From 2160177a3b7b18832093f622c071c7ec010a382f Mon Sep 17 00:00:00 2001 From: Eduardo Silva Date: Thu, 4 Jul 2024 10:54:14 -0600 Subject: [PATCH] lib: librdkafka: upgrade to v2.4.0 Signed-off-by: Eduardo Silva --- cmake/libraries.cmake | 2 +- .../tests/interactive_broker_version.py | 384 ---- lib/librdkafka-2.3.0/tests/requirements.txt | 2 - .../.clang-format-cpp | 0 .../.dir-locals.el | 0 .../.formatignore | 0 .../.gdbmacros | 0 .../.github/ISSUE_TEMPLATE | 0 .../.gitignore | 0 .../.semaphore/project.yml | 0 .../.semaphore/project_public.yml | 0 .../.semaphore/semaphore.yml | 14 +- .../CHANGELOG.md | 108 + .../CMakeLists.txt | 0 .../CODE_OF_CONDUCT.md | 0 .../CONFIGURATION.md | 4 +- .../CONTRIBUTING.md | 0 .../Doxyfile | 0 .../INTRODUCTION.md | 178 +- .../LICENSE | 0 .../LICENSE.cjson | 0 .../LICENSE.crc32c | 0 .../LICENSE.fnv1a | 0 .../LICENSE.hdrhistogram | 0 .../LICENSE.lz4 | 0 .../LICENSE.murmur2 | 0 .../LICENSE.pycrc | 0 .../LICENSE.queue | 0 .../LICENSE.regexp | 0 .../LICENSE.snappy | 0 .../LICENSE.tinycthread | 0 .../LICENSE.wingetopt | 0 .../LICENSES.txt | 0 .../Makefile | 2 +- .../README.md | 0 .../README.win32 | 0 .../STATISTICS.md | 0 .../configure | 0 .../configure.self | 0 .../debian/.gitignore | 0 .../debian/changelog | 0 .../debian/compat | 0 .../debian/control | 0 .../debian/copyright | 0 .../debian/gbp.conf | 0 .../debian/librdkafka++1.install | 0 .../debian/librdkafka-dev.examples | 0 .../debian/librdkafka-dev.install | 0 .../debian/librdkafka1.docs | 0 .../debian/librdkafka1.install | 0 .../debian/librdkafka1.symbols | 0 .../debian/rules | 0 .../debian/source/format | 0 .../debian/watch | 0 .../dev-conf.sh | 0 .../examples/.gitignore | 0 .../examples/CMakeLists.txt | 0 .../examples/Makefile | 0 .../examples/README.md | 0 .../examples/alter_consumer_group_offsets.c | 0 .../examples/consumer.c | 2 +- .../examples/delete_records.c | 0 .../examples/describe_cluster.c | 0 .../examples/describe_consumer_groups.c | 0 .../examples/describe_topics.c | 0 .../examples/globals.json | 0 .../examples/idempotent_producer.c | 0 .../examples/incremental_alter_configs.c | 0 .../examples/kafkatest_verifiable_client.cpp | 0 .../examples/list_consumer_group_offsets.c | 0 .../examples/list_consumer_groups.c | 0 .../examples/list_offsets.c | 0 .../examples/misc.c | 0 .../examples/openssl_engine_example.cpp | 0 .../examples/producer.c | 0 .../examples/producer.cpp | 0 .../rdkafka_complex_consumer_example.c | 0 .../rdkafka_complex_consumer_example.cpp | 0 .../examples/rdkafka_consume_batch.cpp | 0 .../examples/rdkafka_example.c | 0 .../examples/rdkafka_example.cpp | 0 .../examples/rdkafka_performance.c | 0 .../examples/transactions-older-broker.c | 0 .../examples/transactions.c | 0 .../examples/user_scram.c | 0 .../examples/win_ssl_cert_store.cpp | 0 .../lds-gen.py | 0 .../mainpage.doxy | 0 .../mklove/.gitignore | 0 .../mklove/Makefile.base | 0 .../mklove/modules/configure.atomics | 0 .../mklove/modules/configure.base | 2 +- .../mklove/modules/configure.builtin | 0 .../mklove/modules/configure.cc | 0 .../mklove/modules/configure.cxx | 0 .../mklove/modules/configure.fileversion | 0 .../mklove/modules/configure.gitversion | 0 .../mklove/modules/configure.good_cflags | 0 .../mklove/modules/configure.host | 0 .../mklove/modules/configure.lib | 0 .../mklove/modules/configure.libcurl | 0 .../mklove/modules/configure.libsasl2 | 0 .../mklove/modules/configure.libssl | 4 +- .../mklove/modules/configure.libzstd | 0 .../mklove/modules/configure.parseversion | 0 .../mklove/modules/configure.pic | 0 .../mklove/modules/configure.socket | 0 .../mklove/modules/configure.zlib | 0 .../mklove/modules/patches/README.md | 0 ...ibcurl.0000-no-runtime-linking-check.patch | 0 ...osx-rand-include-fix-OpenSSL-PR16409.patch | 0 .../packaging/RELEASE.md | 0 .../packaging/alpine/build-alpine.sh | 0 .../packaging/archlinux/PKGBUILD | 0 .../packaging/cmake/Config.cmake.in | 0 .../packaging/cmake/Modules/FindLZ4.cmake | 0 .../packaging/cmake/Modules/FindZSTD.cmake | 0 .../packaging/cmake/Modules/LICENSE.FindZstd | 0 .../packaging/cmake/README.md | 0 .../packaging/cmake/config.h.in | 0 .../packaging/cmake/parseversion.cmake | 0 .../packaging/cmake/rdkafka.pc.in | 0 .../cmake/try_compile/atomic_32_test.c | 0 .../cmake/try_compile/atomic_64_test.c | 0 .../cmake/try_compile/c11threads_test.c | 0 .../cmake/try_compile/crc32c_hw_test.c | 0 .../packaging/cmake/try_compile/dlopen_test.c | 0 .../cmake/try_compile/libsasl2_test.c | 0 .../try_compile/pthread_setname_darwin_test.c | 0 .../pthread_setname_freebsd_test.c | 0 .../try_compile/pthread_setname_gnu_test.c | 0 .../packaging/cmake/try_compile/rand_r_test.c | 0 .../cmake/try_compile/rdkafka_setup.cmake | 0 .../packaging/cmake/try_compile/regex_test.c | 0 .../cmake/try_compile/strndup_test.c | 0 .../cmake/try_compile/sync_32_test.c | 0 .../cmake/try_compile/sync_64_test.c | 0 .../packaging/cp/README.md | 0 .../packaging/cp/check_features.c | 0 .../packaging/cp/verify-deb.sh | 0 .../packaging/cp/verify-packages.sh | 0 .../packaging/cp/verify-rpm.sh | 0 .../packaging/debian/.gitignore | 0 .../packaging/debian/changelog | 0 .../packaging/debian/compat | 0 .../packaging/debian/control | 0 .../packaging/debian/copyright | 0 .../packaging/debian/docs | 0 .../packaging/debian/gbp.conf | 0 .../packaging/debian/librdkafka-dev.dirs | 0 .../packaging/debian/librdkafka-dev.examples | 0 .../packaging/debian/librdkafka-dev.install | 0 .../packaging/debian/librdkafka-dev.substvars | 0 .../packaging/debian/librdkafka.dsc | 0 .../debian/librdkafka1-dbg.substvars | 0 .../packaging/debian/librdkafka1.dirs | 0 .../packaging/debian/librdkafka1.install | 0 .../debian/librdkafka1.postinst.debhelper | 0 .../debian/librdkafka1.postrm.debhelper | 0 .../packaging/debian/librdkafka1.symbols | 0 .../packaging/debian/rules | 0 .../packaging/debian/source/format | 0 .../packaging/debian/watch | 0 .../packaging/get_version.py | 0 .../packaging/homebrew/README.md | 0 .../packaging/homebrew/brew-update-pr.sh | 0 .../configure-build-msys2-mingw-static.sh | 0 .../mingw-w64/configure-build-msys2-mingw.sh | 0 .../packaging/mingw-w64/run-tests.sh | 0 .../packaging/mingw-w64/semaphoreci-build.sh | 0 .../mingw-w64/travis-before-install.sh | 0 .../packaging/nuget/.gitignore | 0 .../packaging/nuget/README.md | 0 .../packaging/nuget/artifact.py | 0 .../packaging/nuget/cleanup-s3.py | 0 .../msvcr120.zip | Bin .../msvcr140.zip | Bin .../msvcr120.zip | Bin .../msvcr140.zip | Bin .../packaging/nuget/nuget.sh | 0 .../packaging/nuget/nugetpackage.py | 0 .../packaging/nuget/packaging.py | 2 +- .../packaging/nuget/push-to-nuget.sh | 0 .../packaging/nuget/release.py | 0 .../packaging/nuget/requirements.txt | 0 .../packaging/nuget/staticpackage.py | 0 .../nuget/templates/librdkafka.redist.nuspec | 0 .../nuget/templates/librdkafka.redist.props | 0 .../nuget/templates/librdkafka.redist.targets | 0 .../packaging/nuget/zfile/__init__.py | 0 .../packaging/nuget/zfile/zfile.py | 0 .../packaging/rpm/.gitignore | 0 .../packaging/rpm/Makefile | 0 .../packaging/rpm/README.md | 0 .../packaging/rpm/el7-x86_64.cfg | 0 .../packaging/rpm/librdkafka.spec | 0 .../packaging/rpm/mock-on-docker.sh | 0 .../packaging/rpm/tests/.gitignore | 0 .../packaging/rpm/tests/Makefile | 0 .../packaging/rpm/tests/README.md | 0 .../packaging/rpm/tests/run-test.sh | 0 .../packaging/rpm/tests/test-on-docker.sh | 0 .../packaging/rpm/tests/test.c | 0 .../packaging/rpm/tests/test.cpp | 0 .../packaging/tools/build-deb-package.sh | 0 .../packaging/tools/build-debian.sh | 0 .../packaging/tools/build-manylinux.sh | 0 .../tools/build-release-artifacts.sh | 0 .../packaging/tools/distro-build.sh | 0 .../packaging/tools/gh-release-checksums.py | 0 .../packaging/tools/rdutcoverage.sh | 0 .../packaging/tools/requirements.txt | 0 .../packaging/tools/style-format.sh | 0 .../service.yml | 0 .../src-cpp/CMakeLists.txt | 0 .../src-cpp/ConfImpl.cpp | 0 .../src-cpp/ConsumerImpl.cpp | 0 .../src-cpp/HandleImpl.cpp | 0 .../src-cpp/HeadersImpl.cpp | 0 .../src-cpp/KafkaConsumerImpl.cpp | 0 .../src-cpp/Makefile | 0 .../src-cpp/MessageImpl.cpp | 0 .../src-cpp/MetadataImpl.cpp | 0 .../src-cpp/ProducerImpl.cpp | 0 .../src-cpp/QueueImpl.cpp | 0 .../src-cpp/README.md | 0 .../src-cpp/RdKafka.cpp | 0 .../src-cpp/TopicImpl.cpp | 0 .../src-cpp/TopicPartitionImpl.cpp | 0 .../src-cpp/rdkafkacpp.h | 2 +- .../src-cpp/rdkafkacpp_int.h | 0 .../src/CMakeLists.txt | 0 .../src/Makefile | 0 .../src/cJSON.c | 0 .../src/cJSON.h | 0 .../src/crc32c.c | 0 .../src/crc32c.h | 0 .../src/generate_proto.sh | 0 .../src/librdkafka_cgrp_synch.png | Bin .../src/lz4.c | 0 .../src/lz4.h | 0 .../src/lz4frame.c | 0 .../src/lz4frame.h | 0 .../src/lz4frame_static.h | 0 .../src/lz4hc.c | 0 .../src/lz4hc.h | 0 .../src/queue.h | 0 .../src/rd.h | 0 .../src/rdaddr.c | 0 .../src/rdaddr.h | 0 .../src/rdatomic.h | 0 .../src/rdavg.h | 0 .../src/rdavl.c | 0 .../src/rdavl.h | 0 .../src/rdbase64.c | 0 .../src/rdbase64.h | 0 .../src/rdbuf.c | 0 .../src/rdbuf.h | 0 .../src/rdcrc32.c | 0 .../src/rdcrc32.h | 0 .../src/rddl.c | 0 .../src/rddl.h | 0 .../src/rdendian.h | 0 .../src/rdfloat.h | 0 .../src/rdfnv1a.c | 0 .../src/rdfnv1a.h | 0 .../src/rdgz.c | 0 .../src/rdgz.h | 0 .../src/rdhdrhistogram.c | 0 .../src/rdhdrhistogram.h | 0 .../src/rdhttp.c | 0 .../src/rdhttp.h | 0 .../src/rdinterval.h | 0 .../src/rdkafka.c | 166 +- .../src/rdkafka.h | 57 +- .../src/rdkafka_admin.c | 17 +- .../src/rdkafka_admin.h | 0 .../src/rdkafka_assignment.c | 69 +- .../src/rdkafka_assignment.h | 0 .../src/rdkafka_assignor.c | 5 +- .../src/rdkafka_assignor.h | 0 .../src/rdkafka_aux.c | 0 .../src/rdkafka_aux.h | 0 .../src/rdkafka_background.c | 0 .../src/rdkafka_broker.c | 16 +- .../src/rdkafka_broker.h | 10 +- .../src/rdkafka_buf.c | 2 + .../src/rdkafka_buf.h | 6 +- .../src/rdkafka_cert.c | 0 .../src/rdkafka_cert.h | 0 .../src/rdkafka_cgrp.c | 1949 ++++++++++++++--- .../src/rdkafka_cgrp.h | 59 +- .../src/rdkafka_conf.c | 19 +- .../src/rdkafka_conf.h | 8 + .../src/rdkafka_confval.h | 0 .../src/rdkafka_coord.c | 0 .../src/rdkafka_coord.h | 0 .../src/rdkafka_error.c | 0 .../src/rdkafka_error.h | 0 .../src/rdkafka_event.c | 0 .../src/rdkafka_event.h | 0 .../src/rdkafka_feature.c | 0 .../src/rdkafka_feature.h | 0 .../src/rdkafka_fetcher.c | 0 .../src/rdkafka_fetcher.h | 0 .../src/rdkafka_header.c | 0 .../src/rdkafka_header.h | 0 .../src/rdkafka_idempotence.c | 0 .../src/rdkafka_idempotence.h | 0 .../src/rdkafka_int.h | 37 +- .../src/rdkafka_interceptor.c | 0 .../src/rdkafka_interceptor.h | 0 .../src/rdkafka_lz4.c | 0 .../src/rdkafka_lz4.h | 0 .../src/rdkafka_metadata.c | 175 +- .../src/rdkafka_metadata.h | 19 +- .../src/rdkafka_metadata_cache.c | 205 +- .../src/rdkafka_mock.c | 110 +- .../src/rdkafka_mock.h | 26 + .../src/rdkafka_mock_cgrp.c | 0 .../src/rdkafka_mock_handlers.c | 66 +- .../src/rdkafka_mock_int.h | 27 + .../src/rdkafka_msg.c | 47 + .../src/rdkafka_msg.h | 31 + .../src/rdkafka_msgbatch.h | 0 .../src/rdkafka_msgset.h | 0 .../src/rdkafka_msgset_reader.c | 0 .../src/rdkafka_msgset_writer.c | 3 +- .../src/rdkafka_offset.c | 2 - .../src/rdkafka_offset.h | 0 .../src/rdkafka_op.c | 2 + .../src/rdkafka_op.h | 13 +- .../src/rdkafka_partition.c | 402 +++- .../src/rdkafka_partition.h | 91 +- .../src/rdkafka_pattern.c | 0 .../src/rdkafka_pattern.h | 0 .../src/rdkafka_plugin.c | 0 .../src/rdkafka_plugin.h | 0 .../src/rdkafka_proto.h | 85 +- .../src/rdkafka_protocol.h | 3 +- .../src/rdkafka_queue.c | 0 .../src/rdkafka_queue.h | 0 .../src/rdkafka_range_assignor.c | 0 .../src/rdkafka_request.c | 883 +++++--- .../src/rdkafka_request.h | 33 +- .../src/rdkafka_roundrobin_assignor.c | 0 .../src/rdkafka_sasl.c | 0 .../src/rdkafka_sasl.h | 0 .../src/rdkafka_sasl_cyrus.c | 0 .../src/rdkafka_sasl_int.h | 0 .../src/rdkafka_sasl_oauthbearer.c | 0 .../src/rdkafka_sasl_oauthbearer.h | 0 .../src/rdkafka_sasl_oauthbearer_oidc.c | 0 .../src/rdkafka_sasl_oauthbearer_oidc.h | 0 .../src/rdkafka_sasl_plain.c | 0 .../src/rdkafka_sasl_scram.c | 0 .../src/rdkafka_sasl_win32.c | 0 .../src/rdkafka_ssl.c | 3 +- .../src/rdkafka_ssl.h | 0 .../src/rdkafka_sticky_assignor.c | 7 +- .../src/rdkafka_subscription.c | 0 .../src/rdkafka_timer.c | 0 .../src/rdkafka_timer.h | 0 .../src/rdkafka_topic.c | 137 +- .../src/rdkafka_topic.h | 11 + .../src/rdkafka_transport.c | 0 .../src/rdkafka_transport.h | 0 .../src/rdkafka_transport_int.h | 0 .../src/rdkafka_txnmgr.c | 6 +- .../src/rdkafka_txnmgr.h | 0 .../src/rdkafka_zstd.c | 0 .../src/rdkafka_zstd.h | 0 .../src/rdlist.c | 0 .../src/rdlist.h | 0 .../src/rdlog.c | 0 .../src/rdlog.h | 0 .../src/rdmap.c | 16 + .../src/rdmap.h | 5 + .../src/rdmurmur2.c | 0 .../src/rdmurmur2.h | 0 .../src/rdports.c | 0 .../src/rdports.h | 0 .../src/rdposix.h | 0 .../src/rdrand.c | 0 .../src/rdrand.h | 0 .../src/rdregex.c | 0 .../src/rdregex.h | 0 .../src/rdsignal.h | 0 .../src/rdstring.c | 0 .../src/rdstring.h | 0 .../src/rdsysqueue.h | 0 .../src/rdtime.h | 0 .../src/rdtypes.h | 0 .../src/rdunittest.c | 0 .../src/rdunittest.h | 0 .../src/rdvarint.c | 0 .../src/rdvarint.h | 0 .../src/rdwin32.h | 0 .../src/rdxxhash.c | 0 .../src/rdxxhash.h | 0 .../src/regexp.c | 0 .../src/regexp.h | 0 .../src/snappy.c | 0 .../src/snappy.h | 0 .../src/snappy_compat.h | 0 .../src/statistics_schema.json | 0 .../src/tinycthread.c | 0 .../src/tinycthread.h | 0 .../src/tinycthread_extra.c | 0 .../src/tinycthread_extra.h | 0 .../src/win32_config.h | 0 .../tests/.gitignore | 0 .../tests/0000-unittests.c | 0 .../tests/0001-multiobj.c | 0 .../tests/0002-unkpart.c | 0 .../tests/0003-msgmaxsize.c | 0 .../tests/0004-conf.c | 0 .../tests/0005-order.c | 0 .../tests/0006-symbols.c | 0 .../tests/0007-autotopic.c | 0 .../tests/0008-reqacks.c | 0 .../tests/0009-mock_cluster.c | 5 +- .../tests/0011-produce_batch.c | 190 +- .../tests/0012-produce_consume.c | 0 .../tests/0013-null-msgs.c | 0 .../tests/0014-reconsume-191.c | 0 .../tests/0015-offset_seeks.c | 0 .../tests/0016-client_swname.c | 2 +- .../tests/0017-compression.c | 0 .../tests/0018-cgrp_term.c | 3 +- .../tests/0019-list_groups.c | 0 .../tests/0020-destroy_hang.c | 0 .../tests/0021-rkt_destroy.c | 0 .../tests/0022-consume_batch.c | 4 +- .../tests/0025-timers.c | 0 .../tests/0026-consume_pause.c | 8 + .../tests/0028-long_topicnames.c | 0 .../tests/0029-assign_offset.c | 6 + .../tests/0030-offset_commit.c | 0 .../tests/0031-get_offsets.c | 10 +- .../tests/0033-regex_subscribe.c | 7 +- .../tests/0034-offset_reset.c | 0 .../tests/0035-api_version.c | 0 .../tests/0036-partial_fetch.c | 0 .../tests/0037-destroy_hang_local.c | 0 .../tests/0038-performance.c | 0 .../tests/0039-event.c | 0 .../tests/0040-io_event.c | 0 .../tests/0041-fetch_max_bytes.c | 0 .../tests/0042-many_topics.c | 0 .../tests/0043-no_connection.c | 0 .../tests/0044-partition_cnt.c | 0 .../tests/0045-subscribe_update.c | 5 +- .../tests/0046-rkt_cache.c | 0 .../tests/0047-partial_buf_tmout.c | 0 .../tests/0048-partitioner.c | 0 .../tests/0049-consume_conn_close.c | 0 .../tests/0050-subscribe_adds.c | 24 +- .../tests/0051-assign_adds.c | 0 .../tests/0052-msg_timestamps.c | 0 .../tests/0053-stats_cb.cpp | 0 .../tests/0054-offset_time.cpp | 0 .../tests/0055-producer_latency.c | 0 .../tests/0056-balanced_group_mt.c | 0 .../tests/0057-invalid_topic.cpp | 0 .../tests/0058-log.cpp | 0 .../tests/0059-bsearch.cpp | 0 .../tests/0060-op_prio.cpp | 0 .../tests/0061-consumer_lag.cpp | 0 .../tests/0062-stats_event.c | 0 .../tests/0063-clusterid.cpp | 0 .../tests/0064-interceptors.c | 0 .../tests/0065-yield.cpp | 0 .../tests/0066-plugins.cpp | 0 .../tests/0067-empty_topic.cpp | 0 .../tests/0068-produce_timeout.c | 0 .../tests/0069-consumer_add_parts.c | 0 .../tests/0070-null_empty.cpp | 0 .../tests/0072-headers_ut.c | 0 .../tests/0073-headers.c | 0 .../tests/0074-producev.c | 0 .../tests/0075-retry.c | 0 .../tests/0076-produce_retry.c | 100 + .../tests/0077-compaction.c | 0 .../tests/0078-c_from_cpp.cpp | 0 .../tests/0079-fork.c | 0 .../tests/0080-admin_ut.c | 0 .../tests/0081-admin.c | 0 .../tests/0082-fetch_max_bytes.cpp | 0 .../tests/0083-cb_event.c | 0 .../tests/0084-destroy_flags.c | 0 .../tests/0085-headers.cpp | 0 .../tests/0086-purge.c | 0 .../tests/0088-produce_metadata_timeout.c | 0 .../tests/0089-max_poll_interval.c | 0 .../tests/0090-idempotence.c | 0 .../tests/0091-max_poll_interval_timeout.c | 0 .../tests/0092-mixed_msgver.c | 0 .../tests/0093-holb.c | 0 .../tests/0094-idempotence_msg_timeout.c | 0 .../tests/0095-all_brokers_down.cpp | 0 .../tests/0097-ssl_verify.cpp | 0 .../tests/0098-consumer-txn.cpp | 0 .../tests/0099-commit_metadata.c | 0 .../tests/0100-thread_interceptors.cpp | 0 .../tests/0101-fetch-from-follower.cpp | 0 .../tests/0102-static_group_rebalance.c | 0 .../tests/0103-transactions.c | 0 .../tests/0104-fetch_from_follower_mock.c | 5 +- .../tests/0105-transactions_mock.c | 5 +- .../tests/0106-cgrp_sess_timeout.c | 5 +- .../tests/0107-topic_recreate.c | 0 .../tests/0109-auto_create_topics.cpp | 0 .../tests/0110-batch_size.cpp | 0 .../tests/0111-delay_create_topics.cpp | 0 .../tests/0112-assign_unknown_part.c | 0 .../tests/0113-cooperative_rebalance.cpp | 449 ++-- .../tests/0114-sticky_partitioning.cpp | 0 .../tests/0115-producer_auth.cpp | 0 .../tests/0116-kafkaconsumer_close.cpp | 0 .../tests/0117-mock_errors.c | 5 +- .../tests/0118-commit_rebalance.c | 0 .../tests/0119-consumer_auth.cpp | 0 .../tests/0120-asymmetric_subscription.c | 5 +- .../tests/0121-clusterid.c | 5 +- .../0122-buffer_cleaning_after_rebalance.c | 0 .../tests/0123-connections_max_idle.c | 0 .../tests/0124-openssl_invalid_engine.c | 0 .../tests/0125-immediate_flush.c | 5 +- .../tests/0126-oauthbearer_oidc.c | 0 .../tests/0127-fetch_queue_backoff.cpp | 14 +- .../tests/0128-sasl_callback_queue.cpp | 0 .../tests/0129-fetch_aborted_msgs.c | 0 .../tests/0130-store_offsets.c | 0 .../tests/0131-connect_timeout.c | 0 .../tests/0132-strategy_ordering.c | 0 .../tests/0133-ssl_keys.c | 0 .../tests/0134-ssl_provider.c | 0 .../tests/0135-sasl_credentials.cpp | 0 .../tests/0136-resolve_cb.c | 0 .../tests/0137-barrier_batch_consume.c | 0 .../tests/0138-admin_mock.c | 5 +- .../tests/0139-offset_validation_mock.c | 5 +- .../tests/0140-commit_metadata.cpp | 0 .../tests/0142-reauthentication.c | 0 .../tests/0143-exponential_backoff_mock.c | 24 +- .../tests/0144-idempotence_mock.c | 5 +- .../tests/0145-pause_resume_mock.c | 119 + .../tests/0146-metadata_mock.c | 272 +++ .../tests/1000-unktopic.c | 0 .../tests/8000-idle.cpp | 0 .../8001-fetch_from_follower_mock_manual.c | 5 +- .../tests/CMakeLists.txt | 2 + .../tests/LibrdkafkaTestApp.py | 34 +- .../tests/Makefile | 0 .../tests/README.md | 0 .../tests/autotest.sh | 0 .../tests/backtrace.gdb | 0 .../tests/broker_version_tests.py | 30 +- .../tests/buildbox.sh | 0 .../tests/cleanup-checker-tests.sh | 0 .../tests/cluster_testing.py | 20 +- .../tests/delete-test-topics.sh | 0 .../tests/fixtures/ssl/.gitignore | 0 .../tests/fixtures/ssl/Makefile | 0 .../tests/fixtures/ssl/README.md | 0 .../tests/fixtures/ssl/client.keystore.p12 | Bin .../fixtures/ssl/client2.certificate.pem | 0 .../tests/fixtures/ssl/client2.key | 0 .../tests/fixtures/ssl/create_keys.sh | 0 .../tests/fuzzers/.gitignore | 0 .../tests/fuzzers/Makefile | 0 .../tests/fuzzers/README.md | 0 .../tests/fuzzers/fuzz_regex.c | 0 .../tests/fuzzers/helpers.h | 0 .../tests/gen-ssl-certs.sh | 0 .../tests/interactive_broker_version.py | 170 ++ .../tests/interceptor_test/.gitignore | 0 .../tests/interceptor_test/CMakeLists.txt | 0 .../tests/interceptor_test/Makefile | 0 .../tests/interceptor_test/interceptor_test.c | 0 .../tests/interceptor_test/interceptor_test.h | 0 .../tests/java/.gitignore | 0 .../tests/java/IncrementalRebalanceCli.java | 0 .../tests/java/Makefile | 0 .../tests/java/Murmur2Cli.java | 0 .../tests/java/README.md | 0 .../tests/java/TransactionProducerCli.java | 0 .../tests/java/run-class.sh | 0 .../tests/librdkafka.suppressions | 0 .../tests/lz4_manual_test.sh | 0 .../tests/multi-broker-version-test.sh | 0 .../tests/parse-refcnt.sh | 0 .../tests/performance_plot.py | 0 .../tests/plugin_test/Makefile | 0 .../tests/plugin_test/plugin_test.c | 0 lib/librdkafka-2.4.0/tests/requirements.txt | 2 + .../tests/run-consumer-tests.sh | 0 .../tests/run-producer-tests.sh | 0 .../tests/run-test.sh | 0 .../tests/rusage.c | 0 .../tests/sasl_test.py | 59 +- .../tests/scenarios/README.md | 0 .../tests/scenarios/ak23.json | 0 .../tests/scenarios/default.json | 0 .../tests/scenarios/noautocreate.json | 0 .../tests/sockem.c | 0 .../tests/sockem.h | 0 .../tests/sockem_ctrl.c | 0 .../tests/sockem_ctrl.h | 0 .../tests/test.c | 120 +- .../tests/test.conf.example | 0 .../tests/test.h | 28 +- .../tests/testcpp.cpp | 0 .../tests/testcpp.h | 0 .../tests/testshared.h | 0 .../tests/tools/README.md | 0 .../tests/tools/stats/README.md | 0 .../tests/tools/stats/filter.jq | 0 .../tests/tools/stats/graph.py | 0 .../tests/tools/stats/requirements.txt | 0 .../tests/tools/stats/to_csv.py | 0 .../tests/until-fail.sh | 0 .../tests/xxxx-assign_partition.c | 0 .../tests/xxxx-metadata.cpp | 0 .../vcpkg.json | 2 +- .../win32/.gitignore | 0 .../win32/README.md | 0 .../win32/build-package.bat | 0 .../win32/build.bat | 0 .../win32/common.vcxproj | 0 .../win32/install-openssl.ps1 | 0 .../interceptor_test/interceptor_test.vcxproj | 0 .../win32/librdkafka.autopkg.template | 0 .../win32/librdkafka.master.testing.targets | 0 .../win32/librdkafka.sln | 0 .../win32/librdkafka.vcxproj | 0 .../win32/librdkafkacpp/librdkafkacpp.vcxproj | 0 .../win32/msbuild.ps1 | 0 .../openssl_engine_example.vcxproj | 0 .../win32/package-zip.ps1 | 0 .../win32/packages/repositories.config | 0 .../win32/push-package.bat | 0 ...kafka_complex_consumer_example_cpp.vcxproj | 0 .../rdkafka_example/rdkafka_example.vcxproj | 0 .../rdkafka_performance.vcxproj | 0 .../win32/setup-msys2.ps1 | 0 .../win32/setup-vcpkg.ps1 | 3 +- .../win32/tests/.gitignore | 0 .../win32/tests/test.conf.example | 0 .../win32/tests/tests.vcxproj | 2 + .../win_ssl_cert_store.vcxproj | 0 .../win32/wingetopt.c | 0 .../win32/wingetopt.h | 0 .../win32/wintime.h | 0 655 files changed, 5598 insertions(+), 1705 deletions(-) delete mode 100755 lib/librdkafka-2.3.0/tests/interactive_broker_version.py delete mode 100644 lib/librdkafka-2.3.0/tests/requirements.txt rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.clang-format-cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.dir-locals.el (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.formatignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.gdbmacros (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.github/ISSUE_TEMPLATE (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.semaphore/project.yml (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.semaphore/project_public.yml (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/.semaphore/semaphore.yml (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/CHANGELOG.md (92%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/CMakeLists.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/CODE_OF_CONDUCT.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/CONFIGURATION.md (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/CONTRIBUTING.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/Doxyfile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/INTRODUCTION.md (92%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.cjson (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.crc32c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.fnv1a (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.hdrhistogram (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.lz4 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.murmur2 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.pycrc (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.queue (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.regexp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.snappy (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.tinycthread (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSE.wingetopt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/LICENSES.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/Makefile (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/README.win32 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/STATISTICS.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/configure (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/configure.self (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/changelog (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/compat (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/control (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/copyright (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/gbp.conf (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/librdkafka++1.install (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/librdkafka-dev.examples (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/librdkafka-dev.install (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/librdkafka1.docs (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/librdkafka1.install (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/librdkafka1.symbols (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/rules (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/source/format (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/debian/watch (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/dev-conf.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/CMakeLists.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/alter_consumer_group_offsets.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/consumer.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/delete_records.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/describe_cluster.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/describe_consumer_groups.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/describe_topics.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/globals.json (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/idempotent_producer.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/incremental_alter_configs.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/kafkatest_verifiable_client.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/list_consumer_group_offsets.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/list_consumer_groups.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/list_offsets.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/misc.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/openssl_engine_example.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/producer.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/producer.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/rdkafka_complex_consumer_example.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/rdkafka_complex_consumer_example.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/rdkafka_consume_batch.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/rdkafka_example.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/rdkafka_example.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/rdkafka_performance.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/transactions-older-broker.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/transactions.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/user_scram.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/examples/win_ssl_cert_store.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/lds-gen.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mainpage.doxy (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/Makefile.base (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.atomics (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.base (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.builtin (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.cc (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.cxx (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.fileversion (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.gitversion (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.good_cflags (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.host (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.lib (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.libcurl (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.libsasl2 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.libssl (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.libzstd (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.parseversion (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.pic (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.socket (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/configure.zlib (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/patches/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/RELEASE.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/alpine/build-alpine.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/archlinux/PKGBUILD (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/Config.cmake.in (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/Modules/FindLZ4.cmake (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/Modules/FindZSTD.cmake (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/Modules/LICENSE.FindZstd (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/config.h.in (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/parseversion.cmake (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/rdkafka.pc.in (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/atomic_32_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/atomic_64_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/c11threads_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/crc32c_hw_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/dlopen_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/libsasl2_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/pthread_setname_darwin_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/pthread_setname_freebsd_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/pthread_setname_gnu_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/rand_r_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/rdkafka_setup.cmake (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/regex_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/strndup_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/sync_32_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cmake/try_compile/sync_64_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cp/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cp/check_features.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cp/verify-deb.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cp/verify-packages.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/cp/verify-rpm.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/changelog (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/compat (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/control (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/copyright (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/docs (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/gbp.conf (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka-dev.dirs (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka-dev.examples (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka-dev.install (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka-dev.substvars (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka.dsc (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka1-dbg.substvars (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka1.dirs (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka1.install (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka1.postinst.debhelper (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka1.postrm.debhelper (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/librdkafka1.symbols (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/rules (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/source/format (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/debian/watch (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/get_version.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/homebrew/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/homebrew/brew-update-pr.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/mingw-w64/configure-build-msys2-mingw-static.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/mingw-w64/configure-build-msys2-mingw.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/mingw-w64/run-tests.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/mingw-w64/semaphoreci-build.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/mingw-w64/travis-before-install.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/artifact.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/cleanup-s3.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/nuget.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/nugetpackage.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/packaging.py (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/push-to-nuget.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/release.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/requirements.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/staticpackage.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/templates/librdkafka.redist.nuspec (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/templates/librdkafka.redist.props (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/templates/librdkafka.redist.targets (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/zfile/__init__.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/nuget/zfile/zfile.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/el7-x86_64.cfg (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/librdkafka.spec (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/mock-on-docker.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/run-test.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/test-on-docker.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/rpm/tests/test.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/build-deb-package.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/build-debian.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/build-manylinux.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/build-release-artifacts.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/distro-build.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/gh-release-checksums.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/rdutcoverage.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/requirements.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/packaging/tools/style-format.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/service.yml (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/CMakeLists.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/ConfImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/ConsumerImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/HandleImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/HeadersImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/KafkaConsumerImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/MessageImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/MetadataImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/ProducerImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/QueueImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/RdKafka.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/TopicImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/TopicPartitionImpl.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/rdkafkacpp.h (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src-cpp/rdkafkacpp_int.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/CMakeLists.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/cJSON.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/cJSON.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/crc32c.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/crc32c.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/generate_proto.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/librdkafka_cgrp_synch.png (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4frame.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4frame.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4frame_static.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4hc.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/lz4hc.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/queue.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rd.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdaddr.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdaddr.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdatomic.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdavg.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdavl.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdavl.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdbase64.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdbase64.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdbuf.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdbuf.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdcrc32.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdcrc32.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rddl.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rddl.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdendian.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdfloat.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdfnv1a.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdfnv1a.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdgz.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdgz.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdhdrhistogram.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdhdrhistogram.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdhttp.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdhttp.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdinterval.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka.c (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka.h (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_admin.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_admin.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_assignment.c (93%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_assignment.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_assignor.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_assignor.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_aux.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_aux.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_background.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_broker.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_broker.h (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_buf.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_buf.h (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_cert.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_cert.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_cgrp.c (80%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_cgrp.h (85%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_conf.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_conf.h (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_confval.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_coord.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_coord.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_error.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_error.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_event.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_event.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_feature.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_feature.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_fetcher.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_fetcher.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_header.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_header.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_idempotence.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_idempotence.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_int.h (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_interceptor.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_interceptor.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_lz4.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_lz4.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_metadata.c (92%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_metadata.h (94%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_metadata_cache.c (81%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_mock.c (95%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_mock.h (93%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_mock_cgrp.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_mock_handlers.c (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_mock_int.h (95%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_msg.c (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_msg.h (94%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_msgbatch.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_msgset.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_msgset_reader.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_msgset_writer.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_offset.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_offset.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_op.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_op.h (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_partition.c (91%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_partition.h (93%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_pattern.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_pattern.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_plugin.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_plugin.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_proto.h (88%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_protocol.h (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_queue.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_queue.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_range_assignor.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_request.c (88%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_request.h (94%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_roundrobin_assignor.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_cyrus.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_int.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_oauthbearer.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_oauthbearer.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_oauthbearer_oidc.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_oauthbearer_oidc.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_plain.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_scram.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sasl_win32.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_ssl.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_ssl.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_sticky_assignor.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_subscription.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_timer.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_timer.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_topic.c (94%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_topic.h (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_transport.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_transport.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_transport_int.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_txnmgr.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_txnmgr.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_zstd.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdkafka_zstd.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdlist.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdlist.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdlog.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdlog.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdmap.c (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdmap.h (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdmurmur2.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdmurmur2.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdports.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdports.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdposix.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdrand.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdrand.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdregex.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdregex.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdsignal.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdstring.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdstring.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdsysqueue.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdtime.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdtypes.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdunittest.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdunittest.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdvarint.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdvarint.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdwin32.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdxxhash.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/rdxxhash.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/regexp.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/regexp.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/snappy.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/snappy.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/snappy_compat.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/statistics_schema.json (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/tinycthread.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/tinycthread.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/tinycthread_extra.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/tinycthread_extra.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/src/win32_config.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0000-unittests.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0001-multiobj.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0002-unkpart.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0003-msgmaxsize.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0004-conf.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0005-order.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0006-symbols.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0007-autotopic.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0008-reqacks.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0009-mock_cluster.c (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0011-produce_batch.c (75%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0012-produce_consume.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0013-null-msgs.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0014-reconsume-191.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0015-offset_seeks.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0016-client_swname.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0017-compression.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0018-cgrp_term.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0019-list_groups.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0020-destroy_hang.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0021-rkt_destroy.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0022-consume_batch.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0025-timers.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0026-consume_pause.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0028-long_topicnames.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0029-assign_offset.c (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0030-offset_commit.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0031-get_offsets.c (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0033-regex_subscribe.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0034-offset_reset.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0035-api_version.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0036-partial_fetch.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0037-destroy_hang_local.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0038-performance.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0039-event.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0040-io_event.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0041-fetch_max_bytes.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0042-many_topics.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0043-no_connection.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0044-partition_cnt.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0045-subscribe_update.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0046-rkt_cache.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0047-partial_buf_tmout.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0048-partitioner.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0049-consume_conn_close.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0050-subscribe_adds.c (88%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0051-assign_adds.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0052-msg_timestamps.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0053-stats_cb.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0054-offset_time.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0055-producer_latency.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0056-balanced_group_mt.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0057-invalid_topic.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0058-log.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0059-bsearch.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0060-op_prio.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0061-consumer_lag.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0062-stats_event.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0063-clusterid.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0064-interceptors.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0065-yield.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0066-plugins.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0067-empty_topic.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0068-produce_timeout.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0069-consumer_add_parts.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0070-null_empty.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0072-headers_ut.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0073-headers.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0074-producev.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0075-retry.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0076-produce_retry.c (78%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0077-compaction.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0078-c_from_cpp.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0079-fork.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0080-admin_ut.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0081-admin.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0082-fetch_max_bytes.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0083-cb_event.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0084-destroy_flags.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0085-headers.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0086-purge.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0088-produce_metadata_timeout.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0089-max_poll_interval.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0090-idempotence.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0091-max_poll_interval_timeout.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0092-mixed_msgver.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0093-holb.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0094-idempotence_msg_timeout.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0095-all_brokers_down.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0097-ssl_verify.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0098-consumer-txn.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0099-commit_metadata.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0100-thread_interceptors.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0101-fetch-from-follower.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0102-static_group_rebalance.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0103-transactions.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0104-fetch_from_follower_mock.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0105-transactions_mock.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0106-cgrp_sess_timeout.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0107-topic_recreate.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0109-auto_create_topics.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0110-batch_size.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0111-delay_create_topics.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0112-assign_unknown_part.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0113-cooperative_rebalance.cpp (87%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0114-sticky_partitioning.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0115-producer_auth.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0116-kafkaconsumer_close.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0117-mock_errors.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0118-commit_rebalance.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0119-consumer_auth.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0120-asymmetric_subscription.c (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0121-clusterid.c (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0122-buffer_cleaning_after_rebalance.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0123-connections_max_idle.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0124-openssl_invalid_engine.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0125-immediate_flush.c (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0126-oauthbearer_oidc.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0127-fetch_queue_backoff.cpp (94%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0128-sasl_callback_queue.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0129-fetch_aborted_msgs.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0130-store_offsets.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0131-connect_timeout.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0132-strategy_ordering.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0133-ssl_keys.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0134-ssl_provider.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0135-sasl_credentials.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0136-resolve_cb.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0137-barrier_batch_consume.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0138-admin_mock.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0139-offset_validation_mock.c (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0140-commit_metadata.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0142-reauthentication.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0143-exponential_backoff_mock.c (97%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/0144-idempotence_mock.c (99%) create mode 100644 lib/librdkafka-2.4.0/tests/0145-pause_resume_mock.c create mode 100644 lib/librdkafka-2.4.0/tests/0146-metadata_mock.c rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/1000-unktopic.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/8000-idle.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/8001-fetch_from_follower_mock_manual.c (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/CMakeLists.txt (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/LibrdkafkaTestApp.py (95%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/autotest.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/backtrace.gdb (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/broker_version_tests.py (91%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/buildbox.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/cleanup-checker-tests.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/cluster_testing.py (89%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/delete-test-topics.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/client.keystore.p12 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/client2.certificate.pem (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/client2.key (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fixtures/ssl/create_keys.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fuzzers/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fuzzers/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fuzzers/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fuzzers/fuzz_regex.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/fuzzers/helpers.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/gen-ssl-certs.sh (100%) create mode 100755 lib/librdkafka-2.4.0/tests/interactive_broker_version.py rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/interceptor_test/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/interceptor_test/CMakeLists.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/interceptor_test/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/interceptor_test/interceptor_test.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/interceptor_test/interceptor_test.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/IncrementalRebalanceCli.java (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/Murmur2Cli.java (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/TransactionProducerCli.java (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/java/run-class.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/librdkafka.suppressions (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/lz4_manual_test.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/multi-broker-version-test.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/parse-refcnt.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/performance_plot.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/plugin_test/Makefile (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/plugin_test/plugin_test.c (100%) create mode 100644 lib/librdkafka-2.4.0/tests/requirements.txt rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/run-consumer-tests.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/run-producer-tests.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/run-test.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/rusage.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/sasl_test.py (89%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/scenarios/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/scenarios/ak23.json (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/scenarios/default.json (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/scenarios/noautocreate.json (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/sockem.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/sockem.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/sockem_ctrl.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/sockem_ctrl.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/test.c (98%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/test.conf.example (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/test.h (96%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/testcpp.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/testcpp.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/testshared.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/tools/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/tools/stats/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/tools/stats/filter.jq (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/tools/stats/graph.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/tools/stats/requirements.txt (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/tools/stats/to_csv.py (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/until-fail.sh (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/xxxx-assign_partition.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/tests/xxxx-metadata.cpp (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/vcpkg.json (95%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/README.md (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/build-package.bat (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/build.bat (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/common.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/install-openssl.ps1 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/interceptor_test/interceptor_test.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/librdkafka.autopkg.template (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/librdkafka.master.testing.targets (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/librdkafka.sln (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/librdkafka.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/librdkafkacpp/librdkafkacpp.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/msbuild.ps1 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/openssl_engine_example/openssl_engine_example.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/package-zip.ps1 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/packages/repositories.config (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/push-package.bat (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/rdkafka_example/rdkafka_example.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/rdkafka_performance/rdkafka_performance.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/setup-msys2.ps1 (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/setup-vcpkg.ps1 (59%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/tests/.gitignore (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/tests/test.conf.example (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/tests/tests.vcxproj (99%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/wingetopt.c (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/wingetopt.h (100%) rename lib/{librdkafka-2.3.0 => librdkafka-2.4.0}/win32/wintime.h (100%) diff --git a/cmake/libraries.cmake b/cmake/libraries.cmake index aaa1e293db3..147391c7a5b 100644 --- a/cmake/libraries.cmake +++ b/cmake/libraries.cmake @@ -21,6 +21,6 @@ set(FLB_PATH_LIB_MINIZ "lib/miniz") set(FLB_PATH_LIB_TUTF8E "lib/tutf8e") set(FLB_PATH_LIB_CARES "lib/c-ares-1.31.0") set(FLB_PATH_LIB_SNAPPY "lib/snappy-fef67ac") -set(FLB_PATH_LIB_RDKAFKA "lib/librdkafka-2.3.0") +set(FLB_PATH_LIB_RDKAFKA "lib/librdkafka-2.4.0") set(FLB_PATH_LIB_RING_BUFFER "lib/lwrb") set(FLB_PATH_LIB_WASM_MICRO_RUNTIME "lib/wasm-micro-runtime-WAMR-1.3.0") diff --git a/lib/librdkafka-2.3.0/tests/interactive_broker_version.py b/lib/librdkafka-2.3.0/tests/interactive_broker_version.py deleted file mode 100755 index d294b7a61c3..00000000000 --- a/lib/librdkafka-2.3.0/tests/interactive_broker_version.py +++ /dev/null @@ -1,384 +0,0 @@ -#!/usr/bin/env python3 -# -# -# Run librdkafka regression tests on different supported broker versions. -# -# Requires: -# trivup python module -# gradle in your PATH - -from trivup.trivup import Cluster -from trivup.apps.ZookeeperApp import ZookeeperApp -from trivup.apps.KafkaBrokerApp import KafkaBrokerApp -from trivup.apps.KerberosKdcApp import KerberosKdcApp -from trivup.apps.SslApp import SslApp -from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp - -from cluster_testing import read_scenario_conf - -import subprocess -import tempfile -import os -import sys -import argparse -import json - - -def version_as_number(version): - if version == 'trunk': - return sys.maxsize - tokens = version.split('.') - return float('%s.%s' % (tokens[0], tokens[1])) - - -def test_version(version, cmd=None, deploy=True, conf={}, debug=False, - exec_cnt=1, - root_path='tmp', broker_cnt=3, scenario='default'): - """ - @brief Create, deploy and start a Kafka cluster using Kafka \\p version - Then run librdkafka's regression tests. - """ - - print('## Test version %s' % version) - - cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug) - - if conf.get('sasl_oauthbearer_method') == 'OIDC': - oidc = OauthbearerOIDCApp(cluster) - - # Enable SSL if desired - if 'SSL' in conf.get('security.protocol', ''): - cluster.ssl = SslApp(cluster, conf) - - # One ZK (from Kafka repo) - zk1 = ZookeeperApp(cluster) - zk_address = zk1.get('address') - - # Start Kerberos KDC if GSSAPI is configured - if 'GSSAPI' in args.conf.get('sasl_mechanisms', []): - KerberosKdcApp(cluster, 'MYREALM').start() - - defconf = {'version': version} - defconf.update(conf) - - print('conf: ', defconf) - - brokers = [] - for n in range(0, broker_cnt): - # Configure rack & replica selector if broker supports - # fetch-from-follower - if version_as_number(version) >= 2.4: - curr_conf = defconf.get('conf', list()) - defconf.update( - { - 'conf': [ - 'broker.rack=RACK${appid}', - 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector'] + curr_conf}) # noqa: E501 - print('conf broker', str(n), ': ', defconf) - brokers.append(KafkaBrokerApp(cluster, defconf)) - - cmd_env = os.environ.copy() - - # Generate test config file - security_protocol = 'PLAINTEXT' - fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True) - os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii')) - os.write(fd, 'broker.address.family=v4\n'.encode('ascii')) - if version.startswith('0.9') or version.startswith('0.8'): - os.write(fd, 'api.version.request=false\n'.encode('ascii')) - os.write( - fd, ('broker.version.fallback=%s\n' % - version).encode('ascii')) - # SASL (only one mechanism supported) - mech = defconf.get('sasl_mechanisms', '').split(',')[0] - if mech != '': - os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii')) - if mech == 'PLAIN' or mech.find('SCRAM') != -1: - print( - '# Writing SASL %s client config to %s' % - (mech, test_conf_file)) - security_protocol = 'SASL_PLAINTEXT' - # Use first user as SASL user/pass - for up in defconf.get('sasl_users', '').split(','): - u, p = up.split('=') - os.write(fd, ('sasl.username=%s\n' % u).encode('ascii')) - os.write(fd, ('sasl.password=%s\n' % p).encode('ascii')) - break - elif mech == 'OAUTHBEARER': - security_protocol = 'SASL_PLAINTEXT' - if defconf.get('sasl_oauthbearer_method') == 'OIDC': - os.write( - fd, ('sasl.oauthbearer.method=OIDC\n'.encode( - 'ascii'))) - os.write( - fd, ('sasl.oauthbearer.client.id=123\n'.encode( - 'ascii'))) - os.write( - fd, ('sasl.oauthbearer.client.secret=abc\n'.encode( - 'ascii'))) - os.write( - fd, ('sasl.oauthbearer.extensions=\ - ExtensionworkloadIdentity=develC348S,\ - Extensioncluster=lkc123\n'.encode( - 'ascii'))) - os.write( - fd, ('sasl.oauthbearer.scope=test\n'.encode( - 'ascii'))) - cmd_env['VALID_OIDC_URL'] = oidc.conf.get('valid_url') - cmd_env['INVALID_OIDC_URL'] = oidc.conf.get('badformat_url') - cmd_env['EXPIRED_TOKEN_OIDC_URL'] = oidc.conf.get( - 'expired_url') - - else: - os.write( - fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode( - 'ascii'))) - os.write(fd, ('sasl.oauthbearer.config=%s\n' % - 'scope=requiredScope principal=admin').encode( - 'ascii')) - else: - print( - '# FIXME: SASL %s client config not written to %s' % - (mech, test_conf_file)) - - # SSL support - ssl = getattr(cluster, 'ssl', None) - if ssl is not None: - if 'SASL' in security_protocol: - security_protocol = 'SASL_SSL' - else: - security_protocol = 'SSL' - - key = ssl.create_cert('librdkafka') - - os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii')) - os.write(fd, ('ssl.certificate.location=%s\n' % - key['pub']['pem']).encode('ascii')) - os.write( - fd, ('ssl.key.location=%s\n' % - key['priv']['pem']).encode('ascii')) - os.write( - fd, ('ssl.key.password=%s\n' % - key['password']).encode('ascii')) - - for k, v in ssl.ca.items(): - cmd_env['SSL_ca_{}'.format(k)] = v - - # Set envs for all generated keys so tests can find them. - for k, v in key.items(): - if isinstance(v, dict): - for k2, v2 in v.items(): - # E.g. "SSL_priv_der=path/to/librdkafka-priv.der" - cmd_env['SSL_{}_{}'.format(k, k2)] = v2 - else: - cmd_env['SSL_{}'.format(k)] = v - - # Define bootstrap brokers based on selected security protocol - print('# Using client security.protocol=%s' % security_protocol) - all_listeners = ( - ','.join( - cluster.get_all( - 'listeners', - '', - KafkaBrokerApp))).split(',') - bootstrap_servers = ','.join( - [x for x in all_listeners if x.startswith(security_protocol)]) - os.write(fd, ('bootstrap.servers=%s\n' % - bootstrap_servers).encode('ascii')) - os.write(fd, ('security.protocol=%s\n' % - security_protocol).encode('ascii')) - os.close(fd) - - if deploy: - print('# Deploying cluster') - cluster.deploy() - else: - print('# Not deploying') - - print('# Starting cluster, instance path %s' % cluster.instance_path()) - cluster.start() - - print('# Waiting for brokers to come up') - - if not cluster.wait_operational(30): - cluster.stop(force=True) - raise Exception('Cluster %s did not go operational, see logs in %s/%s' % # noqa: E501 - (cluster.name, cluster.root_path, cluster.instance)) - - print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers) - - cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir') - cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file - cmd_env['ZK_ADDRESS'] = zk_address - cmd_env['BROKERS'] = bootstrap_servers - cmd_env['TEST_KAFKA_VERSION'] = version - cmd_env['TRIVUP_ROOT'] = cluster.instance_path() - cmd_env['TEST_SCENARIO'] = scenario - - # Provide a HTTPS REST endpoint for the HTTP client tests. - cmd_env['RD_UT_HTTP_URL'] = 'https://jsonplaceholder.typicode.com/users' - - # Per broker env vars - for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]: - cmd_env['BROKER_ADDRESS_%d' % b.appid] = \ - ','.join([x for x in b.conf['listeners'].split( - ',') if x.startswith(security_protocol)]) - # Add each broker pid as an env so they can be killed indivdidually. - cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid) - # JMX port, if available - jmx_port = b.conf.get('jmx_port', None) - if jmx_port is not None: - cmd_env['BROKER_JMX_PORT_%d' % b.appid] = str(jmx_port) - - if not cmd: - cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\\w$ ' % ( - cluster.name, version) - cmd = 'bash --rcfile <(cat ~/.bashrc)' - - ret = True - - for i in range(0, exec_cnt): - retcode = subprocess.call( - cmd, - env=cmd_env, - shell=True, - executable='/bin/bash') - if retcode != 0: - print('# Command failed with returncode %d: %s' % (retcode, cmd)) - ret = False - - try: - os.remove(test_conf_file) - except BaseException: - pass - - cluster.stop(force=True) - - cluster.cleanup(keeptypes=['log']) - return ret - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser( - description='Start a Kafka cluster and provide an interactive shell') - - parser.add_argument('versions', type=str, default=None, nargs='+', - help='Kafka version(s) to deploy') - parser.add_argument('--no-deploy', action='store_false', dest='deploy', - default=True, - help='Dont deploy applications, ' - 'assume already deployed.') - parser.add_argument('--conf', type=str, dest='conf', default=None, - help=''' - JSON config object (not file). - This does not translate to broker configs directly. - If broker config properties are to be specified, - they should be specified with - --conf \'{"conf": ["key=value", "key=value"]}\'''') - parser.add_argument('--scenario', type=str, dest='scenario', - default='default', - help='Test scenario (see scenarios/ directory)') - parser.add_argument('-c', type=str, dest='cmd', default=None, - help='Command to execute instead of shell') - parser.add_argument('-n', type=int, dest='exec_cnt', default=1, - help='Number of times to execute -c ..') - parser.add_argument('--debug', action='store_true', dest='debug', - default=False, - help='Enable trivup debugging') - parser.add_argument( - '--root', - type=str, - default=os.environ.get( - 'TRIVUP_ROOT', - 'tmp'), - help='Root working directory') - parser.add_argument( - '--port', - default=None, - help='Base TCP port to start allocating from') - parser.add_argument( - '--kafka-src', - dest='kafka_path', - type=str, - default=None, - help='Path to Kafka git repo checkout (used for version=trunk)') - parser.add_argument( - '--brokers', - dest='broker_cnt', - type=int, - default=3, - help='Number of Kafka brokers') - parser.add_argument('--ssl', dest='ssl', action='store_true', - default=False, - help='Enable SSL endpoints') - parser.add_argument( - '--sasl', - dest='sasl', - type=str, - default=None, - help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)') - parser.add_argument( - '--oauthbearer-method', - dest='sasl_oauthbearer_method', - type=str, - default=None, - help='OAUTHBEARER/OIDC method (DEFAULT, OIDC), \ - must config SASL mechanism to OAUTHBEARER') - parser.add_argument( - '--max-reauth-ms', - dest='reauth_ms', - type=int, - default='10000', - help=''' - Sets the value of connections.max.reauth.ms on the brokers. - Set 0 to disable.''') - - args = parser.parse_args() - if args.conf is not None: - args.conf = json.loads(args.conf) - else: - args.conf = {} - - args.conf.update(read_scenario_conf(args.scenario)) - - if args.port is not None: - args.conf['port_base'] = int(args.port) - if args.kafka_path is not None: - args.conf['kafka_path'] = args.kafka_path - if args.ssl: - args.conf['security.protocol'] = 'SSL' - if args.sasl: - if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM') - != -1) and 'sasl_users' not in args.conf: - args.conf['sasl_users'] = 'testuser=testpass' - args.conf['sasl_mechanisms'] = args.sasl - retcode = 0 - if args.sasl_oauthbearer_method: - if args.sasl_oauthbearer_method == "OIDC" and \ - args.conf['sasl_mechanisms'] != 'OAUTHBEARER': - print('If config `--oauthbearer-method=OIDC`, ' - '`--sasl` must be set to `OAUTHBEARER`') - retcode = 3 - sys.exit(retcode) - args.conf['sasl_oauthbearer_method'] = \ - args.sasl_oauthbearer_method - - if 'conf' not in args.conf: - args.conf['conf'] = [] - - args.conf['conf'].append( - "connections.max.reauth.ms={}".format( - args.reauth_ms)) - args.conf['conf'].append("log.retention.bytes=1000000000") - - for version in args.versions: - r = test_version(version, cmd=args.cmd, deploy=args.deploy, - conf=args.conf, debug=args.debug, - exec_cnt=args.exec_cnt, - root_path=args.root, broker_cnt=args.broker_cnt, - scenario=args.scenario) - if not r: - retcode = 2 - - sys.exit(retcode) diff --git a/lib/librdkafka-2.3.0/tests/requirements.txt b/lib/librdkafka-2.3.0/tests/requirements.txt deleted file mode 100644 index c15a66f47ea..00000000000 --- a/lib/librdkafka-2.3.0/tests/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -trivup >= 0.12.1 -jsoncomment diff --git a/lib/librdkafka-2.3.0/.clang-format-cpp b/lib/librdkafka-2.4.0/.clang-format-cpp similarity index 100% rename from lib/librdkafka-2.3.0/.clang-format-cpp rename to lib/librdkafka-2.4.0/.clang-format-cpp diff --git a/lib/librdkafka-2.3.0/.dir-locals.el b/lib/librdkafka-2.4.0/.dir-locals.el similarity index 100% rename from lib/librdkafka-2.3.0/.dir-locals.el rename to lib/librdkafka-2.4.0/.dir-locals.el diff --git a/lib/librdkafka-2.3.0/.formatignore b/lib/librdkafka-2.4.0/.formatignore similarity index 100% rename from lib/librdkafka-2.3.0/.formatignore rename to lib/librdkafka-2.4.0/.formatignore diff --git a/lib/librdkafka-2.3.0/.gdbmacros b/lib/librdkafka-2.4.0/.gdbmacros similarity index 100% rename from lib/librdkafka-2.3.0/.gdbmacros rename to lib/librdkafka-2.4.0/.gdbmacros diff --git a/lib/librdkafka-2.3.0/.github/ISSUE_TEMPLATE b/lib/librdkafka-2.4.0/.github/ISSUE_TEMPLATE similarity index 100% rename from lib/librdkafka-2.3.0/.github/ISSUE_TEMPLATE rename to lib/librdkafka-2.4.0/.github/ISSUE_TEMPLATE diff --git a/lib/librdkafka-2.3.0/.gitignore b/lib/librdkafka-2.4.0/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/.gitignore rename to lib/librdkafka-2.4.0/.gitignore diff --git a/lib/librdkafka-2.3.0/.semaphore/project.yml b/lib/librdkafka-2.4.0/.semaphore/project.yml similarity index 100% rename from lib/librdkafka-2.3.0/.semaphore/project.yml rename to lib/librdkafka-2.4.0/.semaphore/project.yml diff --git a/lib/librdkafka-2.3.0/.semaphore/project_public.yml b/lib/librdkafka-2.4.0/.semaphore/project_public.yml similarity index 100% rename from lib/librdkafka-2.3.0/.semaphore/project_public.yml rename to lib/librdkafka-2.4.0/.semaphore/project_public.yml diff --git a/lib/librdkafka-2.3.0/.semaphore/semaphore.yml b/lib/librdkafka-2.4.0/.semaphore/semaphore.yml similarity index 96% rename from lib/librdkafka-2.3.0/.semaphore/semaphore.yml rename to lib/librdkafka-2.4.0/.semaphore/semaphore.yml index f58bcc23e1a..a08a8715448 100644 --- a/lib/librdkafka-2.3.0/.semaphore/semaphore.yml +++ b/lib/librdkafka-2.4.0/.semaphore/semaphore.yml @@ -2,7 +2,7 @@ version: v1.0 name: 'librdkafka build and release artifact pipeline' agent: machine: - type: s1-prod-macos-arm64 + type: s1-prod-macos-13-5-arm64 execution_time_limit: hours: 3 global_job_config: @@ -17,7 +17,7 @@ blocks: task: agent: machine: - type: s1-prod-macos-arm64 + type: s1-prod-macos-13-5-arm64 env_vars: - name: ARTIFACT_KEY value: p-librdkafka__plat-osx__arch-arm64__lnk-all @@ -43,7 +43,7 @@ blocks: task: agent: machine: - type: s1-prod-macos + type: s1-prod-macos-13-5-amd64 env_vars: - name: ARTIFACT_KEY value: p-librdkafka__plat-osx__arch-x64__lnk-all @@ -112,7 +112,7 @@ blocks: value: -std=gnu90 # Test minimum C standard, default in CentOS 7 prologue: commands: - - docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' jobs: - name: 'Build and integration tests' commands: @@ -120,7 +120,7 @@ blocks: - sudo dpkg -i rapidjson-dev.deb - python3 -m pip install -U pip - python3 -m pip -V - - python3 -m pip install -r tests/requirements.txt + - (cd tests && python3 -m pip install -r requirements.txt) - ./configure --install-deps # split these up - ./packaging/tools/rdutcoverage.sh @@ -147,7 +147,7 @@ blocks: type: s1-prod-ubuntu20-04-amd64-2 prologue: commands: - - docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' epilogue: commands: - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' @@ -203,7 +203,7 @@ blocks: type: s1-prod-ubuntu20-04-arm64-1 prologue: commands: - - docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' epilogue: commands: - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' diff --git a/lib/librdkafka-2.3.0/CHANGELOG.md b/lib/librdkafka-2.4.0/CHANGELOG.md similarity index 92% rename from lib/librdkafka-2.3.0/CHANGELOG.md rename to lib/librdkafka-2.4.0/CHANGELOG.md index ea7206ceacc..cbdf0fbc9fd 100644 --- a/lib/librdkafka-2.3.0/CHANGELOG.md +++ b/lib/librdkafka-2.4.0/CHANGELOG.md @@ -1,3 +1,111 @@ +# librdkafka v2.4.0 + +librdkafka v2.4.0 is a feature release: + + * [KIP-848](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol): The Next Generation of the Consumer Rebalance Protocol. + **Early Access**: This should be used only for evaluation and must not be used in production. Features and contract of this KIP might change in future (#4610). + * [KIP-467](https://cwiki.apache.org/confluence/display/KAFKA/KIP-467%3A+Augment+ProduceResponse+error+messaging+for+specific+culprit+records): Augment ProduceResponse error messaging for specific culprit records (#4583). + * [KIP-516](https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers) + Continue partial implementation by adding a metadata cache by topic id + and updating the topic id corresponding to the partition name (#4676) + * Upgrade OpenSSL to v3.0.12 (while building from source) with various security fixes, + check the [release notes](https://www.openssl.org/news/cl30.txt). + * Integration tests can be started in KRaft mode and run against any + GitHub Kafka branch other than the released versions. + * Fix pipeline inclusion of static binaries (#4666) + * Fix to main loop timeout calculation leading to a tight loop for a + max period of 1 ms (#4671). + * Fixed a bug causing duplicate message consumption from a stale + fetch start offset in some particular cases (#4636) + * Fix to metadata cache expiration on full metadata refresh (#4677). + * Fix for a wrong error returned on full metadata refresh before joining + a consumer group (#4678). + * Fix to metadata refresh interruption (#4679). + * Fix for an undesired partition migration with stale leader epoch (#4680). + * Fix hang in cooperative consumer mode if an assignment is processed + while closing the consumer (#4528). + + +## Upgrade considerations + + * With KIP 467, INVALID_MSG (Java: CorruptRecordExpection) will + be retried automatically. INVALID_RECORD (Java: InvalidRecordException) instead + is not retriable and will be set only to the records that caused the + error. Rest of records in the batch will fail with the new error code + _INVALID_DIFFERENT_RECORD (Java: KafkaException) and can be retried manually, + depending on the application logic (#4583). + + +## Early Access + +### [KIP-848](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol): The Next Generation of the Consumer Rebalance Protocol + * With this new protocol the role of the Group Leader (a member) is removed and + the assignment is calculated by the Group Coordinator (a broker) and sent + to each member through heartbeats. + + The feature is still _not production-ready_. + It's possible to try it in a non-production enviroment. + + A [guide](INTRODUCTION.md#next-generation-of-the-consumer-group-protocol-kip-848) is available + with considerations and steps to follow to test it (#4610). + + +## Fixes + +### General fixes + + * Issues: [confluentinc/confluent-kafka-go#981](https://github.com/confluentinc/confluent-kafka-go/issues/981). + In librdkafka release pipeline a static build containing libsasl2 + could be chosen instead of the alternative one without it. + That caused the libsasl2 dependency to be required in confluent-kafka-go + v2.1.0-linux-musl-arm64 and v2.3.0-linux-musl-arm64. + Solved by correctly excluding the binary configured with that library, + when targeting a static build. + Happening since v2.0.2, with specified platforms, + when using static binaries (#4666). + * Issues: #4684. + When the main thread loop was awakened less than 1 ms + before the expiration of a timeout, it was serving with a zero timeout, + leading to increased CPU usage until the timeout was reached. + Happening since 1.x. + * Issues: #4685. + Metadata cache was cleared on full metadata refresh, leading to unnecessary + refreshes and occasional `UNKNOWN_TOPIC_OR_PART` errors. Solved by updating + cache for existing or hinted entries instead of clearing them. + Happening since 2.1.0 (#4677). + * Issues: #4589. + A metadata call before member joins consumer group, + could lead to an `UNKNOWN_TOPIC_OR_PART` error. Solved by updating + the consumer group following a metadata refresh only in safe states. + Happening since 2.1.0 (#4678). + * Issues: #4577. + Metadata refreshes without partition leader change could lead to a loop of + metadata calls at fixed intervals. Solved by stopping metadata refresh when + all existing metadata is non-stale. Happening since 2.3.0 (#4679). + * Issues: #4687. + A partition migration could happen, using stale metadata, when the partition + was undergoing a validation and being retried because of an error. + Solved by doing a partition migration only with a non-stale leader epoch. + Happening since 2.1.0 (#4680). + +### Consumer fixes + + * Issues: #4686. + In case of subscription change with a consumer using the cooperative assignor + it could resume fetching from a previous position. + That could also happen if resuming a partition that wasn't paused. + Fixed by ensuring that a resume operation is completely a no-op when + the partition isn't paused. + Happening since 1.x (#4636). + * Issues: #4527. + While using the cooperative assignor, given an assignment is received while closing the consumer + it's possible that it gets stuck in state WAIT_ASSIGN_CALL, while the method is converted to + a full unassign. Solved by changing state from WAIT_ASSIGN_CALL to WAIT_UNASSIGN_CALL + while doing this conversion. + Happening since 1.x (#4528). + + + # librdkafka v2.3.0 librdkafka v2.3.0 is a feature release: diff --git a/lib/librdkafka-2.3.0/CMakeLists.txt b/lib/librdkafka-2.4.0/CMakeLists.txt similarity index 100% rename from lib/librdkafka-2.3.0/CMakeLists.txt rename to lib/librdkafka-2.4.0/CMakeLists.txt diff --git a/lib/librdkafka-2.3.0/CODE_OF_CONDUCT.md b/lib/librdkafka-2.4.0/CODE_OF_CONDUCT.md similarity index 100% rename from lib/librdkafka-2.3.0/CODE_OF_CONDUCT.md rename to lib/librdkafka-2.4.0/CODE_OF_CONDUCT.md diff --git a/lib/librdkafka-2.3.0/CONFIGURATION.md b/lib/librdkafka-2.4.0/CONFIGURATION.md similarity index 98% rename from lib/librdkafka-2.3.0/CONFIGURATION.md rename to lib/librdkafka-2.4.0/CONFIGURATION.md index 4a75378b539..ae01d16ddbb 100644 --- a/lib/librdkafka-2.3.0/CONFIGURATION.md +++ b/lib/librdkafka-2.4.0/CONFIGURATION.md @@ -109,7 +109,9 @@ group.instance.id | C | | partition.assignment.strategy | C | | range,roundrobin | medium | The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.
*Type: string* session.timeout.ms | C | 1 .. 3600000 | 45000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
*Type: integer* heartbeat.interval.ms | C | 1 .. 3600000 | 3000 | low | Group session keepalive heartbeat interval.
*Type: integer* -group.protocol.type | C | | consumer | low | Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`.
*Type: string* +group.protocol.type | C | | consumer | low | Group protocol type for the `classic` group protocol. NOTE: Currently, the only supported group protocol type is `consumer`.
*Type: string* +group.protocol | C | classic, consumer | classic | high | Group protocol to use. Use `classic` for the original protocol and `consumer` for the new protocol introduced in KIP-848. Available protocols: classic or consumer. Default is `classic`, but will change to `consumer` in next releases.
*Type: enum value* +group.remote.assignor | C | | | medium | Server side assignor to use. Keep it null to make server select a suitable assignor for the group. Available assignors: uniform or range. Default is null
*Type: string* coordinator.query.interval.ms | C | 1 .. 3600000 | 600000 | low | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
*Type: integer* max.poll.interval.ms | C | 1 .. 86400000 | 300000 | high | Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.
*Type: integer* enable.auto.commit | C | true, false | true | high | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
*Type: boolean* diff --git a/lib/librdkafka-2.3.0/CONTRIBUTING.md b/lib/librdkafka-2.4.0/CONTRIBUTING.md similarity index 100% rename from lib/librdkafka-2.3.0/CONTRIBUTING.md rename to lib/librdkafka-2.4.0/CONTRIBUTING.md diff --git a/lib/librdkafka-2.3.0/Doxyfile b/lib/librdkafka-2.4.0/Doxyfile similarity index 100% rename from lib/librdkafka-2.3.0/Doxyfile rename to lib/librdkafka-2.4.0/Doxyfile diff --git a/lib/librdkafka-2.3.0/INTRODUCTION.md b/lib/librdkafka-2.4.0/INTRODUCTION.md similarity index 92% rename from lib/librdkafka-2.3.0/INTRODUCTION.md rename to lib/librdkafka-2.4.0/INTRODUCTION.md index b0e2bd38b08..1cefbc5aaa2 100644 --- a/lib/librdkafka-2.3.0/INTRODUCTION.md +++ b/lib/librdkafka-2.4.0/INTRODUCTION.md @@ -72,6 +72,7 @@ librdkafka also provides a native C++ interface. - [Auto offset reset](#auto-offset-reset) - [Consumer groups](#consumer-groups) - [Static consumer groups](#static-consumer-groups) + - [Next generation of the consumer group protocol](#next-generation-of-the-consumer-group-protocol-kip-848) - [Topics](#topics) - [Unknown or unauthorized topics](#unknown-or-unauthorized-topics) - [Topic metadata propagation for newly created topics](#topic-metadata-propagation-for-newly-created-topics) @@ -1540,6 +1541,98 @@ the original fatal error code and reason. To read more about static group membership, see [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances). +### Next generation of the consumer group protocol: [KIP 848](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol) + +Starting from librdkafka 2.4.0 the next generation consumer group rebalance protocol +defined in KIP 848 is introduced. + +**Warning** +It's still in **Early Access** which means it's _not production-ready_, +given it's still under validation and lacking some needed features. +Features and their contract might change in future. + +With this protocol the role of the Group Leader (a member) is removed and +the assignment is calculated by the Group Coordinator (a broker) and sent +to each member through heartbeats. + +To test it, a Kafka cluster must be set up, in KRaft mode, and the new group +protocol enabled with the `group.coordinator.rebalance.protocols` property. +Broker version must be Apache Kafka 3.7.0 or newer. See Apache Kafka +[Release Notes](https://cwiki.apache.org/confluence/display/KAFKA/The+Next+Generation+of+the+Consumer+Rebalance+Protocol+%28KIP-848%29+-+Early+Access+Release+Notes). + +Client side, it can be enabled by setting the new property `group.protocol=consumer`. +A second property named `group.remote.assignor` is added to choose desired +remote assignor. + +**Available features** + +- Subscription to one or more topics +- Rebalance callbacks (see contract changes) +- Static group membership +- Configure remote assignor +- Max poll interval is enforced +- Offline upgrade from an empty consumer group with committed offsets + +**Future features** + +- Regular expression support when subscribing +- AdminClient changes as described in the KIP + +**Contract changes** + +Along with the new feature there are some needed contract changes, +so the protocol will be enabled by default only with a librdkafka major release. + + - Deprecated client configurations with the new protocol: + - `partition.assignment.strategy` replaced by `group.remote.assignor` + - `session.timeout.ms` replaced by broker configuration `group.consumer.session.timeout.ms` + - `heartbeat.interval.ms`, replaced by broker configuration `group.consumer.heartbeat.interval.ms` + - `group.protocol.type` which is not used in the new protocol + + - Protocol rebalance is fully incremental, so the only allowed functions to + use in a rebalance callback will be `rd_kafka_incremental_assign` and + `rd_kafka_incremental_unassign`. Currently you can still use existing code + and the expected function to call is determined based on the chosen + `partition.assignment.strategy` but this will be removed in next + release. + + When setting the `group.remote.assignor` property, it's already + required to use the incremental assign and unassign functions. + All assignors are sticky with new protocol, including the _range_ one, that wasn't. + + - With a static group membership, if two members are using the same + `group.instance.id`, the one that joins the consumer group later will be + fenced, with the fatal `UNRELEASED_INSTANCE_ID` error. Before, it was the existing + member to be fenced. This was changed to avoid two members contending the + same id. It also means that any instance that crashes won't be automatically + replaced by a new instance until session times out and it's especially required + to check that consumers are being closed properly on shutdown. Ensuring that + no two instances with same `group.instance.id` are running at any time + is also important. + + - Session timeout is remote only and, if the Coordinator isn't reachable + by a member, this will continue to fetch messages, even if it won't be able to + commit them. Otherwise, the member will be fenced as soon as it receives an + heartbeat response from the Coordinator. + With `classic` protocol, instead, member stops fetching when session timeout + expires on the client. + + For the same reason, when closing or unsubscribing with auto-commit set, + the member will try to commit until a specific timeout has passed. + Currently the timeout is the same as the `classic` protocol and it corresponds + to the `session.timeout.ms`, but it will change before the feature + reaches a stable state. + + - An `UNKNOWN_TOPIC_OR_PART` error isn't received anymore when a consumer is + subscribing to a topic that doesn't exist in local cache, as the consumer + is still subscribing to the topic and it could be created just after that. + + - A consumer won't do a preliminary Metadata call that returns a + `TOPIC_AUTHORIZATION_FAILED`, as it's happening with group protocol `classic`. + Topic partitions will still be assigned to the member + by the Coordinator only if it's authorized to consume from the topic. + + ### Note on Batch consume APIs Using multiple instances of `rd_kafka_consume_batch()` and/or `rd_kafka_consume_batch_queue()` @@ -1933,7 +2026,7 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-455 - AdminAPI: Replica assignment | 2.4.0 (WIP) | Not supported | | KIP-460 - AdminAPI: electPreferredLeader | 2.4.0 | Not supported | | KIP-464 - AdminAPI: defaults for createTopics | 2.4.0 | Supported | -| KIP-467 - Per-message (sort of) error codes in ProduceResponse | 2.4.0 (WIP) | Not supported | +| KIP-467 - Per-message (sort of) error codes in ProduceResponse | 2.4.0 | Supported | | KIP-480 - Sticky partitioner | 2.4.0 | Supported | | KIP-482 - Optional fields in Kafka protocol | 2.4.0 | Partially supported (ApiVersionRequest) | | KIP-496 - AdminAPI: delete offsets | 2.4.0 | Supported | @@ -1951,7 +2044,7 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-559 - Make the Kafka Protocol Friendlier with L7 Proxies | 2.5.0 | Not supported | | KIP-568 - Explicit rebalance triggering on the Consumer | 2.6.0 | Not supported | | KIP-659 - Add metadata to DescribeConfigsResponse | 2.6.0 | Not supported | -| KIP-580 - Exponential backoff for Kafka clients | 3.7.0 (WIP) | supported | +| KIP-580 - Exponential backoff for Kafka clients | 3.7.0 (WIP) | Supported | | KIP-584 - Versioning scheme for features | WIP | Not supported | | KIP-588 - Allow producers to recover gracefully from txn timeouts | 2.8.0 (WIP) | Not supported | | KIP-601 - Configurable socket connection timeout | 2.7.0 | Supported | @@ -1961,56 +2054,57 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-735 - Increase default consumer session timeout | 3.0.0 | Supported | | KIP-768 - SASL/OAUTHBEARER OIDC support | 3.0 | Supported | | KIP-881 - Rack-aware Partition Assignment for Kafka Consumers | 3.5.0 (WIP) | Supported | +| KIP-848 - The Next Generation of the Consumer Rebalance Protocol | 3.7.0 (EA) | Early Access | ### Supported protocol versions -"Kafka max" is the maximum ApiVersion supported in Apache Kafka 3.5.0, while +"Kafka max" is the maximum ApiVersion supported in Apache Kafka 3.7.0, while "librdkafka max" is the maximum ApiVersion supported in the latest release of librdkafka. -| ApiKey | Request name | Kafka max | librdkafka max | -| ------- | ------------------------------| ----------- | ----------------------- | -| 0 | Produce | 9 | 7 | -| 1 | Fetch | 15 | 11 | -| 2 | ListOffsets | 8 | 7 | -| 3 | Metadata | 12 | 12 | -| 8 | OffsetCommit | 8 | 7 | -| 9 | OffsetFetch | 8 | 7 | -| 10 | FindCoordinator | 4 | 2 | -| 11 | JoinGroup | 9 | 5 | -| 12 | Heartbeat | 4 | 3 | -| 13 | LeaveGroup | 5 | 1 | -| 14 | SyncGroup | 5 | 3 | -| 15 | DescribeGroups | 5 | 4 | -| 16 | ListGroups | 4 | 4 | -| 17 | SaslHandshake | 1 | 1 | -| 18 | ApiVersions | 3 | 3 | -| 19 | CreateTopics | 7 | 4 | -| 20 | DeleteTopics | 6 | 1 | -| 21 | DeleteRecords | 2 | 1 | -| 22 | InitProducerId | 4 | 4 | -| 23 | OffsetForLeaderEpoch | 4 | 2 | -| 24 | AddPartitionsToTxn | 4 | 0 | -| 25 | AddOffsetsToTxn | 3 | 0 | -| 26 | EndTxn | 3 | 1 | -| 28 | TxnOffsetCommit | 3 | 3 | -| 29 | DescribeAcls | 3 | 1 | -| 30 | CreateAcls | 3 | 1 | -| 31 | DeleteAcls | 3 | 1 | -| 32 | DescribeConfigs | 4 | 1 | -| 33 | AlterConfigs | 2 | 2 | -| 36 | SaslAuthenticate | 2 | 1 | -| 37 | CreatePartitions | 3 | 0 | -| 42 | DeleteGroups | 2 | 1 | -| 44 | IncrementalAlterConfigs | 1 | 1 | -| 47 | OffsetDelete | 0 | 0 | -| 50 | DescribeUserScramCredentials | 0 | 0 | -| 51 | AlterUserScramCredentials | 0 | 0 | - +| ApiKey | Request name | Kafka max | librdkafka max | +| ------- | ----------------------------- | ---------- | -------------- | +| 0 | Produce | 10 | 8 | +| 1 | Fetch | 16 | 11 | +| 2 | ListOffsets | 8 | 7 | +| 3 | Metadata | 12 | 12 | +| 8 | OffsetCommit | 9 | 9 | +| 9 | OffsetFetch | 9 | 9 | +| 10 | FindCoordinator | 4 | 2 | +| 11 | JoinGroup | 9 | 5 | +| 12 | Heartbeat | 4 | 3 | +| 13 | LeaveGroup | 5 | 1 | +| 14 | SyncGroup | 5 | 3 | +| 15 | DescribeGroups | 5 | 4 | +| 16 | ListGroups | 4 | 4 | +| 17 | SaslHandshake | 1 | 1 | +| 18 | ApiVersions | 3 | 3 | +| 19 | CreateTopics | 7 | 4 | +| 20 | DeleteTopics | 6 | 1 | +| 21 | DeleteRecords | 2 | 1 | +| 22 | InitProducerId | 4 | 4 | +| 23 | OffsetForLeaderEpoch | 4 | 2 | +| 24 | AddPartitionsToTxn | 4 | 0 | +| 25 | AddOffsetsToTxn | 3 | 0 | +| 26 | EndTxn | 3 | 1 | +| 28 | TxnOffsetCommit | 3 | 3 | +| 29 | DescribeAcls | 3 | 1 | +| 30 | CreateAcls | 3 | 1 | +| 31 | DeleteAcls | 3 | 1 | +| 32 | DescribeConfigs | 4 | 1 | +| 33 | AlterConfigs | 2 | 2 | +| 36 | SaslAuthenticate | 2 | 1 | +| 37 | CreatePartitions | 3 | 0 | +| 42 | DeleteGroups | 2 | 1 | +| 44 | IncrementalAlterConfigs | 1 | 1 | +| 47 | OffsetDelete | 0 | 0 | +| 50 | DescribeUserScramCredentials | 0 | 0 | +| 51 | AlterUserScramCredentials | 0 | 0 | +| 68 | ConsumerGroupHeartbeat | 0 | 0 | # Recommendations for language binding developers diff --git a/lib/librdkafka-2.3.0/LICENSE b/lib/librdkafka-2.4.0/LICENSE similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE rename to lib/librdkafka-2.4.0/LICENSE diff --git a/lib/librdkafka-2.3.0/LICENSE.cjson b/lib/librdkafka-2.4.0/LICENSE.cjson similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.cjson rename to lib/librdkafka-2.4.0/LICENSE.cjson diff --git a/lib/librdkafka-2.3.0/LICENSE.crc32c b/lib/librdkafka-2.4.0/LICENSE.crc32c similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.crc32c rename to lib/librdkafka-2.4.0/LICENSE.crc32c diff --git a/lib/librdkafka-2.3.0/LICENSE.fnv1a b/lib/librdkafka-2.4.0/LICENSE.fnv1a similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.fnv1a rename to lib/librdkafka-2.4.0/LICENSE.fnv1a diff --git a/lib/librdkafka-2.3.0/LICENSE.hdrhistogram b/lib/librdkafka-2.4.0/LICENSE.hdrhistogram similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.hdrhistogram rename to lib/librdkafka-2.4.0/LICENSE.hdrhistogram diff --git a/lib/librdkafka-2.3.0/LICENSE.lz4 b/lib/librdkafka-2.4.0/LICENSE.lz4 similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.lz4 rename to lib/librdkafka-2.4.0/LICENSE.lz4 diff --git a/lib/librdkafka-2.3.0/LICENSE.murmur2 b/lib/librdkafka-2.4.0/LICENSE.murmur2 similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.murmur2 rename to lib/librdkafka-2.4.0/LICENSE.murmur2 diff --git a/lib/librdkafka-2.3.0/LICENSE.pycrc b/lib/librdkafka-2.4.0/LICENSE.pycrc similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.pycrc rename to lib/librdkafka-2.4.0/LICENSE.pycrc diff --git a/lib/librdkafka-2.3.0/LICENSE.queue b/lib/librdkafka-2.4.0/LICENSE.queue similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.queue rename to lib/librdkafka-2.4.0/LICENSE.queue diff --git a/lib/librdkafka-2.3.0/LICENSE.regexp b/lib/librdkafka-2.4.0/LICENSE.regexp similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.regexp rename to lib/librdkafka-2.4.0/LICENSE.regexp diff --git a/lib/librdkafka-2.3.0/LICENSE.snappy b/lib/librdkafka-2.4.0/LICENSE.snappy similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.snappy rename to lib/librdkafka-2.4.0/LICENSE.snappy diff --git a/lib/librdkafka-2.3.0/LICENSE.tinycthread b/lib/librdkafka-2.4.0/LICENSE.tinycthread similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.tinycthread rename to lib/librdkafka-2.4.0/LICENSE.tinycthread diff --git a/lib/librdkafka-2.3.0/LICENSE.wingetopt b/lib/librdkafka-2.4.0/LICENSE.wingetopt similarity index 100% rename from lib/librdkafka-2.3.0/LICENSE.wingetopt rename to lib/librdkafka-2.4.0/LICENSE.wingetopt diff --git a/lib/librdkafka-2.3.0/LICENSES.txt b/lib/librdkafka-2.4.0/LICENSES.txt similarity index 100% rename from lib/librdkafka-2.3.0/LICENSES.txt rename to lib/librdkafka-2.4.0/LICENSES.txt diff --git a/lib/librdkafka-2.3.0/Makefile b/lib/librdkafka-2.4.0/Makefile similarity index 99% rename from lib/librdkafka-2.3.0/Makefile rename to lib/librdkafka-2.4.0/Makefile index 2d931f09ab7..d5e168b7834 100755 --- a/lib/librdkafka-2.3.0/Makefile +++ b/lib/librdkafka-2.4.0/Makefile @@ -40,7 +40,7 @@ file-check: CONFIGURATION.md LICENSES.txt examples check: file-check @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done) -install-subdirs: +install-subdirs: libs @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d install || exit $?; done) install: install-subdirs doc-install diff --git a/lib/librdkafka-2.3.0/README.md b/lib/librdkafka-2.4.0/README.md similarity index 100% rename from lib/librdkafka-2.3.0/README.md rename to lib/librdkafka-2.4.0/README.md diff --git a/lib/librdkafka-2.3.0/README.win32 b/lib/librdkafka-2.4.0/README.win32 similarity index 100% rename from lib/librdkafka-2.3.0/README.win32 rename to lib/librdkafka-2.4.0/README.win32 diff --git a/lib/librdkafka-2.3.0/STATISTICS.md b/lib/librdkafka-2.4.0/STATISTICS.md similarity index 100% rename from lib/librdkafka-2.3.0/STATISTICS.md rename to lib/librdkafka-2.4.0/STATISTICS.md diff --git a/lib/librdkafka-2.3.0/configure b/lib/librdkafka-2.4.0/configure similarity index 100% rename from lib/librdkafka-2.3.0/configure rename to lib/librdkafka-2.4.0/configure diff --git a/lib/librdkafka-2.3.0/configure.self b/lib/librdkafka-2.4.0/configure.self similarity index 100% rename from lib/librdkafka-2.3.0/configure.self rename to lib/librdkafka-2.4.0/configure.self diff --git a/lib/librdkafka-2.3.0/debian/.gitignore b/lib/librdkafka-2.4.0/debian/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/debian/.gitignore rename to lib/librdkafka-2.4.0/debian/.gitignore diff --git a/lib/librdkafka-2.3.0/debian/changelog b/lib/librdkafka-2.4.0/debian/changelog similarity index 100% rename from lib/librdkafka-2.3.0/debian/changelog rename to lib/librdkafka-2.4.0/debian/changelog diff --git a/lib/librdkafka-2.3.0/debian/compat b/lib/librdkafka-2.4.0/debian/compat similarity index 100% rename from lib/librdkafka-2.3.0/debian/compat rename to lib/librdkafka-2.4.0/debian/compat diff --git a/lib/librdkafka-2.3.0/debian/control b/lib/librdkafka-2.4.0/debian/control similarity index 100% rename from lib/librdkafka-2.3.0/debian/control rename to lib/librdkafka-2.4.0/debian/control diff --git a/lib/librdkafka-2.3.0/debian/copyright b/lib/librdkafka-2.4.0/debian/copyright similarity index 100% rename from lib/librdkafka-2.3.0/debian/copyright rename to lib/librdkafka-2.4.0/debian/copyright diff --git a/lib/librdkafka-2.3.0/debian/gbp.conf b/lib/librdkafka-2.4.0/debian/gbp.conf similarity index 100% rename from lib/librdkafka-2.3.0/debian/gbp.conf rename to lib/librdkafka-2.4.0/debian/gbp.conf diff --git a/lib/librdkafka-2.3.0/debian/librdkafka++1.install b/lib/librdkafka-2.4.0/debian/librdkafka++1.install similarity index 100% rename from lib/librdkafka-2.3.0/debian/librdkafka++1.install rename to lib/librdkafka-2.4.0/debian/librdkafka++1.install diff --git a/lib/librdkafka-2.3.0/debian/librdkafka-dev.examples b/lib/librdkafka-2.4.0/debian/librdkafka-dev.examples similarity index 100% rename from lib/librdkafka-2.3.0/debian/librdkafka-dev.examples rename to lib/librdkafka-2.4.0/debian/librdkafka-dev.examples diff --git a/lib/librdkafka-2.3.0/debian/librdkafka-dev.install b/lib/librdkafka-2.4.0/debian/librdkafka-dev.install similarity index 100% rename from lib/librdkafka-2.3.0/debian/librdkafka-dev.install rename to lib/librdkafka-2.4.0/debian/librdkafka-dev.install diff --git a/lib/librdkafka-2.3.0/debian/librdkafka1.docs b/lib/librdkafka-2.4.0/debian/librdkafka1.docs similarity index 100% rename from lib/librdkafka-2.3.0/debian/librdkafka1.docs rename to lib/librdkafka-2.4.0/debian/librdkafka1.docs diff --git a/lib/librdkafka-2.3.0/debian/librdkafka1.install b/lib/librdkafka-2.4.0/debian/librdkafka1.install similarity index 100% rename from lib/librdkafka-2.3.0/debian/librdkafka1.install rename to lib/librdkafka-2.4.0/debian/librdkafka1.install diff --git a/lib/librdkafka-2.3.0/debian/librdkafka1.symbols b/lib/librdkafka-2.4.0/debian/librdkafka1.symbols similarity index 100% rename from lib/librdkafka-2.3.0/debian/librdkafka1.symbols rename to lib/librdkafka-2.4.0/debian/librdkafka1.symbols diff --git a/lib/librdkafka-2.3.0/debian/rules b/lib/librdkafka-2.4.0/debian/rules similarity index 100% rename from lib/librdkafka-2.3.0/debian/rules rename to lib/librdkafka-2.4.0/debian/rules diff --git a/lib/librdkafka-2.3.0/debian/source/format b/lib/librdkafka-2.4.0/debian/source/format similarity index 100% rename from lib/librdkafka-2.3.0/debian/source/format rename to lib/librdkafka-2.4.0/debian/source/format diff --git a/lib/librdkafka-2.3.0/debian/watch b/lib/librdkafka-2.4.0/debian/watch similarity index 100% rename from lib/librdkafka-2.3.0/debian/watch rename to lib/librdkafka-2.4.0/debian/watch diff --git a/lib/librdkafka-2.3.0/dev-conf.sh b/lib/librdkafka-2.4.0/dev-conf.sh similarity index 100% rename from lib/librdkafka-2.3.0/dev-conf.sh rename to lib/librdkafka-2.4.0/dev-conf.sh diff --git a/lib/librdkafka-2.3.0/examples/.gitignore b/lib/librdkafka-2.4.0/examples/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/examples/.gitignore rename to lib/librdkafka-2.4.0/examples/.gitignore diff --git a/lib/librdkafka-2.3.0/examples/CMakeLists.txt b/lib/librdkafka-2.4.0/examples/CMakeLists.txt similarity index 100% rename from lib/librdkafka-2.3.0/examples/CMakeLists.txt rename to lib/librdkafka-2.4.0/examples/CMakeLists.txt diff --git a/lib/librdkafka-2.3.0/examples/Makefile b/lib/librdkafka-2.4.0/examples/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/examples/Makefile rename to lib/librdkafka-2.4.0/examples/Makefile diff --git a/lib/librdkafka-2.3.0/examples/README.md b/lib/librdkafka-2.4.0/examples/README.md similarity index 100% rename from lib/librdkafka-2.3.0/examples/README.md rename to lib/librdkafka-2.4.0/examples/README.md diff --git a/lib/librdkafka-2.3.0/examples/alter_consumer_group_offsets.c b/lib/librdkafka-2.4.0/examples/alter_consumer_group_offsets.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/alter_consumer_group_offsets.c rename to lib/librdkafka-2.4.0/examples/alter_consumer_group_offsets.c diff --git a/lib/librdkafka-2.3.0/examples/consumer.c b/lib/librdkafka-2.4.0/examples/consumer.c similarity index 99% rename from lib/librdkafka-2.3.0/examples/consumer.c rename to lib/librdkafka-2.4.0/examples/consumer.c index 8ce6f77f4da..dad3efc43b9 100644 --- a/lib/librdkafka-2.3.0/examples/consumer.c +++ b/lib/librdkafka-2.4.0/examples/consumer.c @@ -258,4 +258,4 @@ int main(int argc, char **argv) { rd_kafka_destroy(rk); return 0; -} +} \ No newline at end of file diff --git a/lib/librdkafka-2.3.0/examples/delete_records.c b/lib/librdkafka-2.4.0/examples/delete_records.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/delete_records.c rename to lib/librdkafka-2.4.0/examples/delete_records.c diff --git a/lib/librdkafka-2.3.0/examples/describe_cluster.c b/lib/librdkafka-2.4.0/examples/describe_cluster.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/describe_cluster.c rename to lib/librdkafka-2.4.0/examples/describe_cluster.c diff --git a/lib/librdkafka-2.3.0/examples/describe_consumer_groups.c b/lib/librdkafka-2.4.0/examples/describe_consumer_groups.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/describe_consumer_groups.c rename to lib/librdkafka-2.4.0/examples/describe_consumer_groups.c diff --git a/lib/librdkafka-2.3.0/examples/describe_topics.c b/lib/librdkafka-2.4.0/examples/describe_topics.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/describe_topics.c rename to lib/librdkafka-2.4.0/examples/describe_topics.c diff --git a/lib/librdkafka-2.3.0/examples/globals.json b/lib/librdkafka-2.4.0/examples/globals.json similarity index 100% rename from lib/librdkafka-2.3.0/examples/globals.json rename to lib/librdkafka-2.4.0/examples/globals.json diff --git a/lib/librdkafka-2.3.0/examples/idempotent_producer.c b/lib/librdkafka-2.4.0/examples/idempotent_producer.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/idempotent_producer.c rename to lib/librdkafka-2.4.0/examples/idempotent_producer.c diff --git a/lib/librdkafka-2.3.0/examples/incremental_alter_configs.c b/lib/librdkafka-2.4.0/examples/incremental_alter_configs.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/incremental_alter_configs.c rename to lib/librdkafka-2.4.0/examples/incremental_alter_configs.c diff --git a/lib/librdkafka-2.3.0/examples/kafkatest_verifiable_client.cpp b/lib/librdkafka-2.4.0/examples/kafkatest_verifiable_client.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/kafkatest_verifiable_client.cpp rename to lib/librdkafka-2.4.0/examples/kafkatest_verifiable_client.cpp diff --git a/lib/librdkafka-2.3.0/examples/list_consumer_group_offsets.c b/lib/librdkafka-2.4.0/examples/list_consumer_group_offsets.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/list_consumer_group_offsets.c rename to lib/librdkafka-2.4.0/examples/list_consumer_group_offsets.c diff --git a/lib/librdkafka-2.3.0/examples/list_consumer_groups.c b/lib/librdkafka-2.4.0/examples/list_consumer_groups.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/list_consumer_groups.c rename to lib/librdkafka-2.4.0/examples/list_consumer_groups.c diff --git a/lib/librdkafka-2.3.0/examples/list_offsets.c b/lib/librdkafka-2.4.0/examples/list_offsets.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/list_offsets.c rename to lib/librdkafka-2.4.0/examples/list_offsets.c diff --git a/lib/librdkafka-2.3.0/examples/misc.c b/lib/librdkafka-2.4.0/examples/misc.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/misc.c rename to lib/librdkafka-2.4.0/examples/misc.c diff --git a/lib/librdkafka-2.3.0/examples/openssl_engine_example.cpp b/lib/librdkafka-2.4.0/examples/openssl_engine_example.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/openssl_engine_example.cpp rename to lib/librdkafka-2.4.0/examples/openssl_engine_example.cpp diff --git a/lib/librdkafka-2.3.0/examples/producer.c b/lib/librdkafka-2.4.0/examples/producer.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/producer.c rename to lib/librdkafka-2.4.0/examples/producer.c diff --git a/lib/librdkafka-2.3.0/examples/producer.cpp b/lib/librdkafka-2.4.0/examples/producer.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/producer.cpp rename to lib/librdkafka-2.4.0/examples/producer.cpp diff --git a/lib/librdkafka-2.3.0/examples/rdkafka_complex_consumer_example.c b/lib/librdkafka-2.4.0/examples/rdkafka_complex_consumer_example.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/rdkafka_complex_consumer_example.c rename to lib/librdkafka-2.4.0/examples/rdkafka_complex_consumer_example.c diff --git a/lib/librdkafka-2.3.0/examples/rdkafka_complex_consumer_example.cpp b/lib/librdkafka-2.4.0/examples/rdkafka_complex_consumer_example.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/rdkafka_complex_consumer_example.cpp rename to lib/librdkafka-2.4.0/examples/rdkafka_complex_consumer_example.cpp diff --git a/lib/librdkafka-2.3.0/examples/rdkafka_consume_batch.cpp b/lib/librdkafka-2.4.0/examples/rdkafka_consume_batch.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/rdkafka_consume_batch.cpp rename to lib/librdkafka-2.4.0/examples/rdkafka_consume_batch.cpp diff --git a/lib/librdkafka-2.3.0/examples/rdkafka_example.c b/lib/librdkafka-2.4.0/examples/rdkafka_example.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/rdkafka_example.c rename to lib/librdkafka-2.4.0/examples/rdkafka_example.c diff --git a/lib/librdkafka-2.3.0/examples/rdkafka_example.cpp b/lib/librdkafka-2.4.0/examples/rdkafka_example.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/rdkafka_example.cpp rename to lib/librdkafka-2.4.0/examples/rdkafka_example.cpp diff --git a/lib/librdkafka-2.3.0/examples/rdkafka_performance.c b/lib/librdkafka-2.4.0/examples/rdkafka_performance.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/rdkafka_performance.c rename to lib/librdkafka-2.4.0/examples/rdkafka_performance.c diff --git a/lib/librdkafka-2.3.0/examples/transactions-older-broker.c b/lib/librdkafka-2.4.0/examples/transactions-older-broker.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/transactions-older-broker.c rename to lib/librdkafka-2.4.0/examples/transactions-older-broker.c diff --git a/lib/librdkafka-2.3.0/examples/transactions.c b/lib/librdkafka-2.4.0/examples/transactions.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/transactions.c rename to lib/librdkafka-2.4.0/examples/transactions.c diff --git a/lib/librdkafka-2.3.0/examples/user_scram.c b/lib/librdkafka-2.4.0/examples/user_scram.c similarity index 100% rename from lib/librdkafka-2.3.0/examples/user_scram.c rename to lib/librdkafka-2.4.0/examples/user_scram.c diff --git a/lib/librdkafka-2.3.0/examples/win_ssl_cert_store.cpp b/lib/librdkafka-2.4.0/examples/win_ssl_cert_store.cpp similarity index 100% rename from lib/librdkafka-2.3.0/examples/win_ssl_cert_store.cpp rename to lib/librdkafka-2.4.0/examples/win_ssl_cert_store.cpp diff --git a/lib/librdkafka-2.3.0/lds-gen.py b/lib/librdkafka-2.4.0/lds-gen.py similarity index 100% rename from lib/librdkafka-2.3.0/lds-gen.py rename to lib/librdkafka-2.4.0/lds-gen.py diff --git a/lib/librdkafka-2.3.0/mainpage.doxy b/lib/librdkafka-2.4.0/mainpage.doxy similarity index 100% rename from lib/librdkafka-2.3.0/mainpage.doxy rename to lib/librdkafka-2.4.0/mainpage.doxy diff --git a/lib/librdkafka-2.3.0/mklove/.gitignore b/lib/librdkafka-2.4.0/mklove/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/mklove/.gitignore rename to lib/librdkafka-2.4.0/mklove/.gitignore diff --git a/lib/librdkafka-2.3.0/mklove/Makefile.base b/lib/librdkafka-2.4.0/mklove/Makefile.base similarity index 100% rename from lib/librdkafka-2.3.0/mklove/Makefile.base rename to lib/librdkafka-2.4.0/mklove/Makefile.base diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.atomics b/lib/librdkafka-2.4.0/mklove/modules/configure.atomics similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.atomics rename to lib/librdkafka-2.4.0/mklove/modules/configure.atomics diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.base b/lib/librdkafka-2.4.0/mklove/modules/configure.base similarity index 99% rename from lib/librdkafka-2.3.0/mklove/modules/configure.base rename to lib/librdkafka-2.4.0/mklove/modules/configure.base index 77cee61a684..c95ca944641 100644 --- a/lib/librdkafka-2.3.0/mklove/modules/configure.base +++ b/lib/librdkafka-2.4.0/mklove/modules/configure.base @@ -489,7 +489,7 @@ function mkl_dep_install_source { # Build and install mkl_dbg "Building $name from source in $sdir (func $func)" - $func $name "$ddir" >$ilog 2>&1 + libdir="/usr/lib" $func $name "$ddir" >$ilog 2>&1 retcode=$? mkl_popd # $sdir diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.builtin b/lib/librdkafka-2.4.0/mklove/modules/configure.builtin similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.builtin rename to lib/librdkafka-2.4.0/mklove/modules/configure.builtin diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.cc b/lib/librdkafka-2.4.0/mklove/modules/configure.cc similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.cc rename to lib/librdkafka-2.4.0/mklove/modules/configure.cc diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.cxx b/lib/librdkafka-2.4.0/mklove/modules/configure.cxx similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.cxx rename to lib/librdkafka-2.4.0/mklove/modules/configure.cxx diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.fileversion b/lib/librdkafka-2.4.0/mklove/modules/configure.fileversion similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.fileversion rename to lib/librdkafka-2.4.0/mklove/modules/configure.fileversion diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.gitversion b/lib/librdkafka-2.4.0/mklove/modules/configure.gitversion similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.gitversion rename to lib/librdkafka-2.4.0/mklove/modules/configure.gitversion diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.good_cflags b/lib/librdkafka-2.4.0/mklove/modules/configure.good_cflags similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.good_cflags rename to lib/librdkafka-2.4.0/mklove/modules/configure.good_cflags diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.host b/lib/librdkafka-2.4.0/mklove/modules/configure.host similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.host rename to lib/librdkafka-2.4.0/mklove/modules/configure.host diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.lib b/lib/librdkafka-2.4.0/mklove/modules/configure.lib similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.lib rename to lib/librdkafka-2.4.0/mklove/modules/configure.lib diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.libcurl b/lib/librdkafka-2.4.0/mklove/modules/configure.libcurl similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.libcurl rename to lib/librdkafka-2.4.0/mklove/modules/configure.libcurl diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.libsasl2 b/lib/librdkafka-2.4.0/mklove/modules/configure.libsasl2 similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.libsasl2 rename to lib/librdkafka-2.4.0/mklove/modules/configure.libsasl2 diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.libssl b/lib/librdkafka-2.4.0/mklove/modules/configure.libssl similarity index 97% rename from lib/librdkafka-2.3.0/mklove/modules/configure.libssl rename to lib/librdkafka-2.4.0/mklove/modules/configure.libssl index a0aed5e77c0..8cf87045bc6 100644 --- a/lib/librdkafka-2.3.0/mklove/modules/configure.libssl +++ b/lib/librdkafka-2.4.0/mklove/modules/configure.libssl @@ -91,8 +91,8 @@ function manual_checks { function libcrypto_install_source { local name=$1 local destdir=$2 - local ver=3.0.11 - local checksum="b3425d3bb4a2218d0697eb41f7fc0cdede016ed19ca49d168b78e8d947887f55" + local ver=3.0.12 + local checksum="f93c9e8edde5e9166119de31755fc87b4aa34863662f67ddfcba14d0b6b69b61" local url=https://www.openssl.org/source/openssl-${ver}.tar.gz local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib" diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.libzstd b/lib/librdkafka-2.4.0/mklove/modules/configure.libzstd similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.libzstd rename to lib/librdkafka-2.4.0/mklove/modules/configure.libzstd diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.parseversion b/lib/librdkafka-2.4.0/mklove/modules/configure.parseversion similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.parseversion rename to lib/librdkafka-2.4.0/mklove/modules/configure.parseversion diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.pic b/lib/librdkafka-2.4.0/mklove/modules/configure.pic similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.pic rename to lib/librdkafka-2.4.0/mklove/modules/configure.pic diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.socket b/lib/librdkafka-2.4.0/mklove/modules/configure.socket similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.socket rename to lib/librdkafka-2.4.0/mklove/modules/configure.socket diff --git a/lib/librdkafka-2.3.0/mklove/modules/configure.zlib b/lib/librdkafka-2.4.0/mklove/modules/configure.zlib similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/configure.zlib rename to lib/librdkafka-2.4.0/mklove/modules/configure.zlib diff --git a/lib/librdkafka-2.3.0/mklove/modules/patches/README.md b/lib/librdkafka-2.4.0/mklove/modules/patches/README.md similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/patches/README.md rename to lib/librdkafka-2.4.0/mklove/modules/patches/README.md diff --git a/lib/librdkafka-2.3.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch b/lib/librdkafka-2.4.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch rename to lib/librdkafka-2.4.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch diff --git a/lib/librdkafka-2.3.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch b/lib/librdkafka-2.4.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch similarity index 100% rename from lib/librdkafka-2.3.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch rename to lib/librdkafka-2.4.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch diff --git a/lib/librdkafka-2.3.0/packaging/RELEASE.md b/lib/librdkafka-2.4.0/packaging/RELEASE.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/RELEASE.md rename to lib/librdkafka-2.4.0/packaging/RELEASE.md diff --git a/lib/librdkafka-2.3.0/packaging/alpine/build-alpine.sh b/lib/librdkafka-2.4.0/packaging/alpine/build-alpine.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/alpine/build-alpine.sh rename to lib/librdkafka-2.4.0/packaging/alpine/build-alpine.sh diff --git a/lib/librdkafka-2.3.0/packaging/archlinux/PKGBUILD b/lib/librdkafka-2.4.0/packaging/archlinux/PKGBUILD similarity index 100% rename from lib/librdkafka-2.3.0/packaging/archlinux/PKGBUILD rename to lib/librdkafka-2.4.0/packaging/archlinux/PKGBUILD diff --git a/lib/librdkafka-2.3.0/packaging/cmake/Config.cmake.in b/lib/librdkafka-2.4.0/packaging/cmake/Config.cmake.in similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/Config.cmake.in rename to lib/librdkafka-2.4.0/packaging/cmake/Config.cmake.in diff --git a/lib/librdkafka-2.3.0/packaging/cmake/Modules/FindLZ4.cmake b/lib/librdkafka-2.4.0/packaging/cmake/Modules/FindLZ4.cmake similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/Modules/FindLZ4.cmake rename to lib/librdkafka-2.4.0/packaging/cmake/Modules/FindLZ4.cmake diff --git a/lib/librdkafka-2.3.0/packaging/cmake/Modules/FindZSTD.cmake b/lib/librdkafka-2.4.0/packaging/cmake/Modules/FindZSTD.cmake similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/Modules/FindZSTD.cmake rename to lib/librdkafka-2.4.0/packaging/cmake/Modules/FindZSTD.cmake diff --git a/lib/librdkafka-2.3.0/packaging/cmake/Modules/LICENSE.FindZstd b/lib/librdkafka-2.4.0/packaging/cmake/Modules/LICENSE.FindZstd similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/Modules/LICENSE.FindZstd rename to lib/librdkafka-2.4.0/packaging/cmake/Modules/LICENSE.FindZstd diff --git a/lib/librdkafka-2.3.0/packaging/cmake/README.md b/lib/librdkafka-2.4.0/packaging/cmake/README.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/README.md rename to lib/librdkafka-2.4.0/packaging/cmake/README.md diff --git a/lib/librdkafka-2.3.0/packaging/cmake/config.h.in b/lib/librdkafka-2.4.0/packaging/cmake/config.h.in similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/config.h.in rename to lib/librdkafka-2.4.0/packaging/cmake/config.h.in diff --git a/lib/librdkafka-2.3.0/packaging/cmake/parseversion.cmake b/lib/librdkafka-2.4.0/packaging/cmake/parseversion.cmake similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/parseversion.cmake rename to lib/librdkafka-2.4.0/packaging/cmake/parseversion.cmake diff --git a/lib/librdkafka-2.3.0/packaging/cmake/rdkafka.pc.in b/lib/librdkafka-2.4.0/packaging/cmake/rdkafka.pc.in similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/rdkafka.pc.in rename to lib/librdkafka-2.4.0/packaging/cmake/rdkafka.pc.in diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/atomic_32_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/atomic_32_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/atomic_32_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/atomic_32_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/atomic_64_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/atomic_64_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/atomic_64_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/atomic_64_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/c11threads_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/c11threads_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/c11threads_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/c11threads_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/crc32c_hw_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/crc32c_hw_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/crc32c_hw_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/crc32c_hw_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/dlopen_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/dlopen_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/dlopen_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/dlopen_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/libsasl2_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/libsasl2_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/libsasl2_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/libsasl2_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/rand_r_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/rand_r_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/rand_r_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/rand_r_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/rdkafka_setup.cmake b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/rdkafka_setup.cmake similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/rdkafka_setup.cmake rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/rdkafka_setup.cmake diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/regex_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/regex_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/regex_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/regex_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/strndup_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/strndup_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/strndup_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/strndup_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/sync_32_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/sync_32_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/sync_32_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/sync_32_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cmake/try_compile/sync_64_test.c b/lib/librdkafka-2.4.0/packaging/cmake/try_compile/sync_64_test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cmake/try_compile/sync_64_test.c rename to lib/librdkafka-2.4.0/packaging/cmake/try_compile/sync_64_test.c diff --git a/lib/librdkafka-2.3.0/packaging/cp/README.md b/lib/librdkafka-2.4.0/packaging/cp/README.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cp/README.md rename to lib/librdkafka-2.4.0/packaging/cp/README.md diff --git a/lib/librdkafka-2.3.0/packaging/cp/check_features.c b/lib/librdkafka-2.4.0/packaging/cp/check_features.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cp/check_features.c rename to lib/librdkafka-2.4.0/packaging/cp/check_features.c diff --git a/lib/librdkafka-2.3.0/packaging/cp/verify-deb.sh b/lib/librdkafka-2.4.0/packaging/cp/verify-deb.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cp/verify-deb.sh rename to lib/librdkafka-2.4.0/packaging/cp/verify-deb.sh diff --git a/lib/librdkafka-2.3.0/packaging/cp/verify-packages.sh b/lib/librdkafka-2.4.0/packaging/cp/verify-packages.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cp/verify-packages.sh rename to lib/librdkafka-2.4.0/packaging/cp/verify-packages.sh diff --git a/lib/librdkafka-2.3.0/packaging/cp/verify-rpm.sh b/lib/librdkafka-2.4.0/packaging/cp/verify-rpm.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/cp/verify-rpm.sh rename to lib/librdkafka-2.4.0/packaging/cp/verify-rpm.sh diff --git a/lib/librdkafka-2.3.0/packaging/debian/.gitignore b/lib/librdkafka-2.4.0/packaging/debian/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/.gitignore rename to lib/librdkafka-2.4.0/packaging/debian/.gitignore diff --git a/lib/librdkafka-2.3.0/packaging/debian/changelog b/lib/librdkafka-2.4.0/packaging/debian/changelog similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/changelog rename to lib/librdkafka-2.4.0/packaging/debian/changelog diff --git a/lib/librdkafka-2.3.0/packaging/debian/compat b/lib/librdkafka-2.4.0/packaging/debian/compat similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/compat rename to lib/librdkafka-2.4.0/packaging/debian/compat diff --git a/lib/librdkafka-2.3.0/packaging/debian/control b/lib/librdkafka-2.4.0/packaging/debian/control similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/control rename to lib/librdkafka-2.4.0/packaging/debian/control diff --git a/lib/librdkafka-2.3.0/packaging/debian/copyright b/lib/librdkafka-2.4.0/packaging/debian/copyright similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/copyright rename to lib/librdkafka-2.4.0/packaging/debian/copyright diff --git a/lib/librdkafka-2.3.0/packaging/debian/docs b/lib/librdkafka-2.4.0/packaging/debian/docs similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/docs rename to lib/librdkafka-2.4.0/packaging/debian/docs diff --git a/lib/librdkafka-2.3.0/packaging/debian/gbp.conf b/lib/librdkafka-2.4.0/packaging/debian/gbp.conf similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/gbp.conf rename to lib/librdkafka-2.4.0/packaging/debian/gbp.conf diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.dirs b/lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.dirs similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.dirs rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.dirs diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.examples b/lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.examples similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.examples rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.examples diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.install b/lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.install similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.install rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.install diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.substvars b/lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.substvars similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka-dev.substvars rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka-dev.substvars diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka.dsc b/lib/librdkafka-2.4.0/packaging/debian/librdkafka.dsc similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka.dsc rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka.dsc diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka1-dbg.substvars b/lib/librdkafka-2.4.0/packaging/debian/librdkafka1-dbg.substvars similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka1-dbg.substvars rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka1-dbg.substvars diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka1.dirs b/lib/librdkafka-2.4.0/packaging/debian/librdkafka1.dirs similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka1.dirs rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka1.dirs diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka1.install b/lib/librdkafka-2.4.0/packaging/debian/librdkafka1.install similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka1.install rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka1.install diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka1.postinst.debhelper b/lib/librdkafka-2.4.0/packaging/debian/librdkafka1.postinst.debhelper similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka1.postinst.debhelper rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka1.postinst.debhelper diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka1.postrm.debhelper b/lib/librdkafka-2.4.0/packaging/debian/librdkafka1.postrm.debhelper similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka1.postrm.debhelper rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka1.postrm.debhelper diff --git a/lib/librdkafka-2.3.0/packaging/debian/librdkafka1.symbols b/lib/librdkafka-2.4.0/packaging/debian/librdkafka1.symbols similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/librdkafka1.symbols rename to lib/librdkafka-2.4.0/packaging/debian/librdkafka1.symbols diff --git a/lib/librdkafka-2.3.0/packaging/debian/rules b/lib/librdkafka-2.4.0/packaging/debian/rules similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/rules rename to lib/librdkafka-2.4.0/packaging/debian/rules diff --git a/lib/librdkafka-2.3.0/packaging/debian/source/format b/lib/librdkafka-2.4.0/packaging/debian/source/format similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/source/format rename to lib/librdkafka-2.4.0/packaging/debian/source/format diff --git a/lib/librdkafka-2.3.0/packaging/debian/watch b/lib/librdkafka-2.4.0/packaging/debian/watch similarity index 100% rename from lib/librdkafka-2.3.0/packaging/debian/watch rename to lib/librdkafka-2.4.0/packaging/debian/watch diff --git a/lib/librdkafka-2.3.0/packaging/get_version.py b/lib/librdkafka-2.4.0/packaging/get_version.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/get_version.py rename to lib/librdkafka-2.4.0/packaging/get_version.py diff --git a/lib/librdkafka-2.3.0/packaging/homebrew/README.md b/lib/librdkafka-2.4.0/packaging/homebrew/README.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/homebrew/README.md rename to lib/librdkafka-2.4.0/packaging/homebrew/README.md diff --git a/lib/librdkafka-2.3.0/packaging/homebrew/brew-update-pr.sh b/lib/librdkafka-2.4.0/packaging/homebrew/brew-update-pr.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/homebrew/brew-update-pr.sh rename to lib/librdkafka-2.4.0/packaging/homebrew/brew-update-pr.sh diff --git a/lib/librdkafka-2.3.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh b/lib/librdkafka-2.4.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh rename to lib/librdkafka-2.4.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh diff --git a/lib/librdkafka-2.3.0/packaging/mingw-w64/configure-build-msys2-mingw.sh b/lib/librdkafka-2.4.0/packaging/mingw-w64/configure-build-msys2-mingw.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/mingw-w64/configure-build-msys2-mingw.sh rename to lib/librdkafka-2.4.0/packaging/mingw-w64/configure-build-msys2-mingw.sh diff --git a/lib/librdkafka-2.3.0/packaging/mingw-w64/run-tests.sh b/lib/librdkafka-2.4.0/packaging/mingw-w64/run-tests.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/mingw-w64/run-tests.sh rename to lib/librdkafka-2.4.0/packaging/mingw-w64/run-tests.sh diff --git a/lib/librdkafka-2.3.0/packaging/mingw-w64/semaphoreci-build.sh b/lib/librdkafka-2.4.0/packaging/mingw-w64/semaphoreci-build.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/mingw-w64/semaphoreci-build.sh rename to lib/librdkafka-2.4.0/packaging/mingw-w64/semaphoreci-build.sh diff --git a/lib/librdkafka-2.3.0/packaging/mingw-w64/travis-before-install.sh b/lib/librdkafka-2.4.0/packaging/mingw-w64/travis-before-install.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/mingw-w64/travis-before-install.sh rename to lib/librdkafka-2.4.0/packaging/mingw-w64/travis-before-install.sh diff --git a/lib/librdkafka-2.3.0/packaging/nuget/.gitignore b/lib/librdkafka-2.4.0/packaging/nuget/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/.gitignore rename to lib/librdkafka-2.4.0/packaging/nuget/.gitignore diff --git a/lib/librdkafka-2.3.0/packaging/nuget/README.md b/lib/librdkafka-2.4.0/packaging/nuget/README.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/README.md rename to lib/librdkafka-2.4.0/packaging/nuget/README.md diff --git a/lib/librdkafka-2.3.0/packaging/nuget/artifact.py b/lib/librdkafka-2.4.0/packaging/nuget/artifact.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/artifact.py rename to lib/librdkafka-2.4.0/packaging/nuget/artifact.py diff --git a/lib/librdkafka-2.3.0/packaging/nuget/cleanup-s3.py b/lib/librdkafka-2.4.0/packaging/nuget/cleanup-s3.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/cleanup-s3.py rename to lib/librdkafka-2.4.0/packaging/nuget/cleanup-s3.py diff --git a/lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip b/lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip rename to lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip diff --git a/lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip b/lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip rename to lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip diff --git a/lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip b/lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip rename to lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip diff --git a/lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip b/lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip rename to lib/librdkafka-2.4.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip diff --git a/lib/librdkafka-2.3.0/packaging/nuget/nuget.sh b/lib/librdkafka-2.4.0/packaging/nuget/nuget.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/nuget.sh rename to lib/librdkafka-2.4.0/packaging/nuget/nuget.sh diff --git a/lib/librdkafka-2.3.0/packaging/nuget/nugetpackage.py b/lib/librdkafka-2.4.0/packaging/nuget/nugetpackage.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/nugetpackage.py rename to lib/librdkafka-2.4.0/packaging/nuget/nugetpackage.py diff --git a/lib/librdkafka-2.3.0/packaging/nuget/packaging.py b/lib/librdkafka-2.4.0/packaging/nuget/packaging.py similarity index 99% rename from lib/librdkafka-2.3.0/packaging/nuget/packaging.py rename to lib/librdkafka-2.4.0/packaging/nuget/packaging.py index c4dab806d64..c0201980293 100755 --- a/lib/librdkafka-2.3.0/packaging/nuget/packaging.py +++ b/lib/librdkafka-2.4.0/packaging/nuget/packaging.py @@ -380,7 +380,7 @@ def apply_mappings(self): attr = attr[1:] if attr in a.info and \ - a.info[attr] != m.attributes[origattr]: + a.info[attr] == m.attributes[origattr]: found = False break else: diff --git a/lib/librdkafka-2.3.0/packaging/nuget/push-to-nuget.sh b/lib/librdkafka-2.4.0/packaging/nuget/push-to-nuget.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/push-to-nuget.sh rename to lib/librdkafka-2.4.0/packaging/nuget/push-to-nuget.sh diff --git a/lib/librdkafka-2.3.0/packaging/nuget/release.py b/lib/librdkafka-2.4.0/packaging/nuget/release.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/release.py rename to lib/librdkafka-2.4.0/packaging/nuget/release.py diff --git a/lib/librdkafka-2.3.0/packaging/nuget/requirements.txt b/lib/librdkafka-2.4.0/packaging/nuget/requirements.txt similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/requirements.txt rename to lib/librdkafka-2.4.0/packaging/nuget/requirements.txt diff --git a/lib/librdkafka-2.3.0/packaging/nuget/staticpackage.py b/lib/librdkafka-2.4.0/packaging/nuget/staticpackage.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/staticpackage.py rename to lib/librdkafka-2.4.0/packaging/nuget/staticpackage.py diff --git a/lib/librdkafka-2.3.0/packaging/nuget/templates/librdkafka.redist.nuspec b/lib/librdkafka-2.4.0/packaging/nuget/templates/librdkafka.redist.nuspec similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/templates/librdkafka.redist.nuspec rename to lib/librdkafka-2.4.0/packaging/nuget/templates/librdkafka.redist.nuspec diff --git a/lib/librdkafka-2.3.0/packaging/nuget/templates/librdkafka.redist.props b/lib/librdkafka-2.4.0/packaging/nuget/templates/librdkafka.redist.props similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/templates/librdkafka.redist.props rename to lib/librdkafka-2.4.0/packaging/nuget/templates/librdkafka.redist.props diff --git a/lib/librdkafka-2.3.0/packaging/nuget/templates/librdkafka.redist.targets b/lib/librdkafka-2.4.0/packaging/nuget/templates/librdkafka.redist.targets similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/templates/librdkafka.redist.targets rename to lib/librdkafka-2.4.0/packaging/nuget/templates/librdkafka.redist.targets diff --git a/lib/librdkafka-2.3.0/packaging/nuget/zfile/__init__.py b/lib/librdkafka-2.4.0/packaging/nuget/zfile/__init__.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/zfile/__init__.py rename to lib/librdkafka-2.4.0/packaging/nuget/zfile/__init__.py diff --git a/lib/librdkafka-2.3.0/packaging/nuget/zfile/zfile.py b/lib/librdkafka-2.4.0/packaging/nuget/zfile/zfile.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/nuget/zfile/zfile.py rename to lib/librdkafka-2.4.0/packaging/nuget/zfile/zfile.py diff --git a/lib/librdkafka-2.3.0/packaging/rpm/.gitignore b/lib/librdkafka-2.4.0/packaging/rpm/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/.gitignore rename to lib/librdkafka-2.4.0/packaging/rpm/.gitignore diff --git a/lib/librdkafka-2.3.0/packaging/rpm/Makefile b/lib/librdkafka-2.4.0/packaging/rpm/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/Makefile rename to lib/librdkafka-2.4.0/packaging/rpm/Makefile diff --git a/lib/librdkafka-2.3.0/packaging/rpm/README.md b/lib/librdkafka-2.4.0/packaging/rpm/README.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/README.md rename to lib/librdkafka-2.4.0/packaging/rpm/README.md diff --git a/lib/librdkafka-2.3.0/packaging/rpm/el7-x86_64.cfg b/lib/librdkafka-2.4.0/packaging/rpm/el7-x86_64.cfg similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/el7-x86_64.cfg rename to lib/librdkafka-2.4.0/packaging/rpm/el7-x86_64.cfg diff --git a/lib/librdkafka-2.3.0/packaging/rpm/librdkafka.spec b/lib/librdkafka-2.4.0/packaging/rpm/librdkafka.spec similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/librdkafka.spec rename to lib/librdkafka-2.4.0/packaging/rpm/librdkafka.spec diff --git a/lib/librdkafka-2.3.0/packaging/rpm/mock-on-docker.sh b/lib/librdkafka-2.4.0/packaging/rpm/mock-on-docker.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/mock-on-docker.sh rename to lib/librdkafka-2.4.0/packaging/rpm/mock-on-docker.sh diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/.gitignore b/lib/librdkafka-2.4.0/packaging/rpm/tests/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/.gitignore rename to lib/librdkafka-2.4.0/packaging/rpm/tests/.gitignore diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/Makefile b/lib/librdkafka-2.4.0/packaging/rpm/tests/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/Makefile rename to lib/librdkafka-2.4.0/packaging/rpm/tests/Makefile diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/README.md b/lib/librdkafka-2.4.0/packaging/rpm/tests/README.md similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/README.md rename to lib/librdkafka-2.4.0/packaging/rpm/tests/README.md diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/run-test.sh b/lib/librdkafka-2.4.0/packaging/rpm/tests/run-test.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/run-test.sh rename to lib/librdkafka-2.4.0/packaging/rpm/tests/run-test.sh diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/test-on-docker.sh b/lib/librdkafka-2.4.0/packaging/rpm/tests/test-on-docker.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/test-on-docker.sh rename to lib/librdkafka-2.4.0/packaging/rpm/tests/test-on-docker.sh diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/test.c b/lib/librdkafka-2.4.0/packaging/rpm/tests/test.c similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/test.c rename to lib/librdkafka-2.4.0/packaging/rpm/tests/test.c diff --git a/lib/librdkafka-2.3.0/packaging/rpm/tests/test.cpp b/lib/librdkafka-2.4.0/packaging/rpm/tests/test.cpp similarity index 100% rename from lib/librdkafka-2.3.0/packaging/rpm/tests/test.cpp rename to lib/librdkafka-2.4.0/packaging/rpm/tests/test.cpp diff --git a/lib/librdkafka-2.3.0/packaging/tools/build-deb-package.sh b/lib/librdkafka-2.4.0/packaging/tools/build-deb-package.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/build-deb-package.sh rename to lib/librdkafka-2.4.0/packaging/tools/build-deb-package.sh diff --git a/lib/librdkafka-2.3.0/packaging/tools/build-debian.sh b/lib/librdkafka-2.4.0/packaging/tools/build-debian.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/build-debian.sh rename to lib/librdkafka-2.4.0/packaging/tools/build-debian.sh diff --git a/lib/librdkafka-2.3.0/packaging/tools/build-manylinux.sh b/lib/librdkafka-2.4.0/packaging/tools/build-manylinux.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/build-manylinux.sh rename to lib/librdkafka-2.4.0/packaging/tools/build-manylinux.sh diff --git a/lib/librdkafka-2.3.0/packaging/tools/build-release-artifacts.sh b/lib/librdkafka-2.4.0/packaging/tools/build-release-artifacts.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/build-release-artifacts.sh rename to lib/librdkafka-2.4.0/packaging/tools/build-release-artifacts.sh diff --git a/lib/librdkafka-2.3.0/packaging/tools/distro-build.sh b/lib/librdkafka-2.4.0/packaging/tools/distro-build.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/distro-build.sh rename to lib/librdkafka-2.4.0/packaging/tools/distro-build.sh diff --git a/lib/librdkafka-2.3.0/packaging/tools/gh-release-checksums.py b/lib/librdkafka-2.4.0/packaging/tools/gh-release-checksums.py similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/gh-release-checksums.py rename to lib/librdkafka-2.4.0/packaging/tools/gh-release-checksums.py diff --git a/lib/librdkafka-2.3.0/packaging/tools/rdutcoverage.sh b/lib/librdkafka-2.4.0/packaging/tools/rdutcoverage.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/rdutcoverage.sh rename to lib/librdkafka-2.4.0/packaging/tools/rdutcoverage.sh diff --git a/lib/librdkafka-2.3.0/packaging/tools/requirements.txt b/lib/librdkafka-2.4.0/packaging/tools/requirements.txt similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/requirements.txt rename to lib/librdkafka-2.4.0/packaging/tools/requirements.txt diff --git a/lib/librdkafka-2.3.0/packaging/tools/style-format.sh b/lib/librdkafka-2.4.0/packaging/tools/style-format.sh similarity index 100% rename from lib/librdkafka-2.3.0/packaging/tools/style-format.sh rename to lib/librdkafka-2.4.0/packaging/tools/style-format.sh diff --git a/lib/librdkafka-2.3.0/service.yml b/lib/librdkafka-2.4.0/service.yml similarity index 100% rename from lib/librdkafka-2.3.0/service.yml rename to lib/librdkafka-2.4.0/service.yml diff --git a/lib/librdkafka-2.3.0/src-cpp/CMakeLists.txt b/lib/librdkafka-2.4.0/src-cpp/CMakeLists.txt similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/CMakeLists.txt rename to lib/librdkafka-2.4.0/src-cpp/CMakeLists.txt diff --git a/lib/librdkafka-2.3.0/src-cpp/ConfImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/ConfImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/ConfImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/ConfImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/ConsumerImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/ConsumerImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/ConsumerImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/ConsumerImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/HandleImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/HandleImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/HandleImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/HandleImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/HeadersImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/HeadersImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/HeadersImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/HeadersImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/KafkaConsumerImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/KafkaConsumerImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/KafkaConsumerImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/KafkaConsumerImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/Makefile b/lib/librdkafka-2.4.0/src-cpp/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/Makefile rename to lib/librdkafka-2.4.0/src-cpp/Makefile diff --git a/lib/librdkafka-2.3.0/src-cpp/MessageImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/MessageImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/MessageImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/MessageImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/MetadataImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/MetadataImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/MetadataImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/MetadataImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/ProducerImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/ProducerImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/ProducerImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/ProducerImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/QueueImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/QueueImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/QueueImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/QueueImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/README.md b/lib/librdkafka-2.4.0/src-cpp/README.md similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/README.md rename to lib/librdkafka-2.4.0/src-cpp/README.md diff --git a/lib/librdkafka-2.3.0/src-cpp/RdKafka.cpp b/lib/librdkafka-2.4.0/src-cpp/RdKafka.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/RdKafka.cpp rename to lib/librdkafka-2.4.0/src-cpp/RdKafka.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/TopicImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/TopicImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/TopicImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/TopicImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/TopicPartitionImpl.cpp b/lib/librdkafka-2.4.0/src-cpp/TopicPartitionImpl.cpp similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/TopicPartitionImpl.cpp rename to lib/librdkafka-2.4.0/src-cpp/TopicPartitionImpl.cpp diff --git a/lib/librdkafka-2.3.0/src-cpp/rdkafkacpp.h b/lib/librdkafka-2.4.0/src-cpp/rdkafkacpp.h similarity index 99% rename from lib/librdkafka-2.3.0/src-cpp/rdkafkacpp.h rename to lib/librdkafka-2.4.0/src-cpp/rdkafkacpp.h index f353d064b47..82c37dc20ed 100644 --- a/lib/librdkafka-2.3.0/src-cpp/rdkafkacpp.h +++ b/lib/librdkafka-2.4.0/src-cpp/rdkafkacpp.h @@ -112,7 +112,7 @@ namespace RdKafka { * @remark This value should only be used during compile time, * for runtime checks of version use RdKafka::version() */ -#define RD_KAFKA_VERSION 0x020300ff +#define RD_KAFKA_VERSION 0x020400ff /** * @brief Returns the librdkafka version as integer. diff --git a/lib/librdkafka-2.3.0/src-cpp/rdkafkacpp_int.h b/lib/librdkafka-2.4.0/src-cpp/rdkafkacpp_int.h similarity index 100% rename from lib/librdkafka-2.3.0/src-cpp/rdkafkacpp_int.h rename to lib/librdkafka-2.4.0/src-cpp/rdkafkacpp_int.h diff --git a/lib/librdkafka-2.3.0/src/CMakeLists.txt b/lib/librdkafka-2.4.0/src/CMakeLists.txt similarity index 100% rename from lib/librdkafka-2.3.0/src/CMakeLists.txt rename to lib/librdkafka-2.4.0/src/CMakeLists.txt diff --git a/lib/librdkafka-2.3.0/src/Makefile b/lib/librdkafka-2.4.0/src/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/src/Makefile rename to lib/librdkafka-2.4.0/src/Makefile diff --git a/lib/librdkafka-2.3.0/src/cJSON.c b/lib/librdkafka-2.4.0/src/cJSON.c similarity index 100% rename from lib/librdkafka-2.3.0/src/cJSON.c rename to lib/librdkafka-2.4.0/src/cJSON.c diff --git a/lib/librdkafka-2.3.0/src/cJSON.h b/lib/librdkafka-2.4.0/src/cJSON.h similarity index 100% rename from lib/librdkafka-2.3.0/src/cJSON.h rename to lib/librdkafka-2.4.0/src/cJSON.h diff --git a/lib/librdkafka-2.3.0/src/crc32c.c b/lib/librdkafka-2.4.0/src/crc32c.c similarity index 100% rename from lib/librdkafka-2.3.0/src/crc32c.c rename to lib/librdkafka-2.4.0/src/crc32c.c diff --git a/lib/librdkafka-2.3.0/src/crc32c.h b/lib/librdkafka-2.4.0/src/crc32c.h similarity index 100% rename from lib/librdkafka-2.3.0/src/crc32c.h rename to lib/librdkafka-2.4.0/src/crc32c.h diff --git a/lib/librdkafka-2.3.0/src/generate_proto.sh b/lib/librdkafka-2.4.0/src/generate_proto.sh similarity index 100% rename from lib/librdkafka-2.3.0/src/generate_proto.sh rename to lib/librdkafka-2.4.0/src/generate_proto.sh diff --git a/lib/librdkafka-2.3.0/src/librdkafka_cgrp_synch.png b/lib/librdkafka-2.4.0/src/librdkafka_cgrp_synch.png similarity index 100% rename from lib/librdkafka-2.3.0/src/librdkafka_cgrp_synch.png rename to lib/librdkafka-2.4.0/src/librdkafka_cgrp_synch.png diff --git a/lib/librdkafka-2.3.0/src/lz4.c b/lib/librdkafka-2.4.0/src/lz4.c similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4.c rename to lib/librdkafka-2.4.0/src/lz4.c diff --git a/lib/librdkafka-2.3.0/src/lz4.h b/lib/librdkafka-2.4.0/src/lz4.h similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4.h rename to lib/librdkafka-2.4.0/src/lz4.h diff --git a/lib/librdkafka-2.3.0/src/lz4frame.c b/lib/librdkafka-2.4.0/src/lz4frame.c similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4frame.c rename to lib/librdkafka-2.4.0/src/lz4frame.c diff --git a/lib/librdkafka-2.3.0/src/lz4frame.h b/lib/librdkafka-2.4.0/src/lz4frame.h similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4frame.h rename to lib/librdkafka-2.4.0/src/lz4frame.h diff --git a/lib/librdkafka-2.3.0/src/lz4frame_static.h b/lib/librdkafka-2.4.0/src/lz4frame_static.h similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4frame_static.h rename to lib/librdkafka-2.4.0/src/lz4frame_static.h diff --git a/lib/librdkafka-2.3.0/src/lz4hc.c b/lib/librdkafka-2.4.0/src/lz4hc.c similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4hc.c rename to lib/librdkafka-2.4.0/src/lz4hc.c diff --git a/lib/librdkafka-2.3.0/src/lz4hc.h b/lib/librdkafka-2.4.0/src/lz4hc.h similarity index 100% rename from lib/librdkafka-2.3.0/src/lz4hc.h rename to lib/librdkafka-2.4.0/src/lz4hc.h diff --git a/lib/librdkafka-2.3.0/src/queue.h b/lib/librdkafka-2.4.0/src/queue.h similarity index 100% rename from lib/librdkafka-2.3.0/src/queue.h rename to lib/librdkafka-2.4.0/src/queue.h diff --git a/lib/librdkafka-2.3.0/src/rd.h b/lib/librdkafka-2.4.0/src/rd.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rd.h rename to lib/librdkafka-2.4.0/src/rd.h diff --git a/lib/librdkafka-2.3.0/src/rdaddr.c b/lib/librdkafka-2.4.0/src/rdaddr.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdaddr.c rename to lib/librdkafka-2.4.0/src/rdaddr.c diff --git a/lib/librdkafka-2.3.0/src/rdaddr.h b/lib/librdkafka-2.4.0/src/rdaddr.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdaddr.h rename to lib/librdkafka-2.4.0/src/rdaddr.h diff --git a/lib/librdkafka-2.3.0/src/rdatomic.h b/lib/librdkafka-2.4.0/src/rdatomic.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdatomic.h rename to lib/librdkafka-2.4.0/src/rdatomic.h diff --git a/lib/librdkafka-2.3.0/src/rdavg.h b/lib/librdkafka-2.4.0/src/rdavg.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdavg.h rename to lib/librdkafka-2.4.0/src/rdavg.h diff --git a/lib/librdkafka-2.3.0/src/rdavl.c b/lib/librdkafka-2.4.0/src/rdavl.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdavl.c rename to lib/librdkafka-2.4.0/src/rdavl.c diff --git a/lib/librdkafka-2.3.0/src/rdavl.h b/lib/librdkafka-2.4.0/src/rdavl.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdavl.h rename to lib/librdkafka-2.4.0/src/rdavl.h diff --git a/lib/librdkafka-2.3.0/src/rdbase64.c b/lib/librdkafka-2.4.0/src/rdbase64.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdbase64.c rename to lib/librdkafka-2.4.0/src/rdbase64.c diff --git a/lib/librdkafka-2.3.0/src/rdbase64.h b/lib/librdkafka-2.4.0/src/rdbase64.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdbase64.h rename to lib/librdkafka-2.4.0/src/rdbase64.h diff --git a/lib/librdkafka-2.3.0/src/rdbuf.c b/lib/librdkafka-2.4.0/src/rdbuf.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdbuf.c rename to lib/librdkafka-2.4.0/src/rdbuf.c diff --git a/lib/librdkafka-2.3.0/src/rdbuf.h b/lib/librdkafka-2.4.0/src/rdbuf.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdbuf.h rename to lib/librdkafka-2.4.0/src/rdbuf.h diff --git a/lib/librdkafka-2.3.0/src/rdcrc32.c b/lib/librdkafka-2.4.0/src/rdcrc32.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdcrc32.c rename to lib/librdkafka-2.4.0/src/rdcrc32.c diff --git a/lib/librdkafka-2.3.0/src/rdcrc32.h b/lib/librdkafka-2.4.0/src/rdcrc32.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdcrc32.h rename to lib/librdkafka-2.4.0/src/rdcrc32.h diff --git a/lib/librdkafka-2.3.0/src/rddl.c b/lib/librdkafka-2.4.0/src/rddl.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rddl.c rename to lib/librdkafka-2.4.0/src/rddl.c diff --git a/lib/librdkafka-2.3.0/src/rddl.h b/lib/librdkafka-2.4.0/src/rddl.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rddl.h rename to lib/librdkafka-2.4.0/src/rddl.h diff --git a/lib/librdkafka-2.3.0/src/rdendian.h b/lib/librdkafka-2.4.0/src/rdendian.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdendian.h rename to lib/librdkafka-2.4.0/src/rdendian.h diff --git a/lib/librdkafka-2.3.0/src/rdfloat.h b/lib/librdkafka-2.4.0/src/rdfloat.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdfloat.h rename to lib/librdkafka-2.4.0/src/rdfloat.h diff --git a/lib/librdkafka-2.3.0/src/rdfnv1a.c b/lib/librdkafka-2.4.0/src/rdfnv1a.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdfnv1a.c rename to lib/librdkafka-2.4.0/src/rdfnv1a.c diff --git a/lib/librdkafka-2.3.0/src/rdfnv1a.h b/lib/librdkafka-2.4.0/src/rdfnv1a.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdfnv1a.h rename to lib/librdkafka-2.4.0/src/rdfnv1a.h diff --git a/lib/librdkafka-2.3.0/src/rdgz.c b/lib/librdkafka-2.4.0/src/rdgz.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdgz.c rename to lib/librdkafka-2.4.0/src/rdgz.c diff --git a/lib/librdkafka-2.3.0/src/rdgz.h b/lib/librdkafka-2.4.0/src/rdgz.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdgz.h rename to lib/librdkafka-2.4.0/src/rdgz.h diff --git a/lib/librdkafka-2.3.0/src/rdhdrhistogram.c b/lib/librdkafka-2.4.0/src/rdhdrhistogram.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdhdrhistogram.c rename to lib/librdkafka-2.4.0/src/rdhdrhistogram.c diff --git a/lib/librdkafka-2.3.0/src/rdhdrhistogram.h b/lib/librdkafka-2.4.0/src/rdhdrhistogram.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdhdrhistogram.h rename to lib/librdkafka-2.4.0/src/rdhdrhistogram.h diff --git a/lib/librdkafka-2.3.0/src/rdhttp.c b/lib/librdkafka-2.4.0/src/rdhttp.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdhttp.c rename to lib/librdkafka-2.4.0/src/rdhttp.c diff --git a/lib/librdkafka-2.3.0/src/rdhttp.h b/lib/librdkafka-2.4.0/src/rdhttp.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdhttp.h rename to lib/librdkafka-2.4.0/src/rdhttp.h diff --git a/lib/librdkafka-2.3.0/src/rdinterval.h b/lib/librdkafka-2.4.0/src/rdinterval.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdinterval.h rename to lib/librdkafka-2.4.0/src/rdinterval.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka.c b/lib/librdkafka-2.4.0/src/rdkafka.c similarity index 96% rename from lib/librdkafka-2.3.0/src/rdkafka.c rename to lib/librdkafka-2.4.0/src/rdkafka.c index 99d9c17449c..a23bad46931 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka.c +++ b/lib/librdkafka-2.4.0/src/rdkafka.c @@ -64,6 +64,7 @@ #endif #include "rdtime.h" +#include "rdmap.h" #include "crc32c.h" #include "rdunittest.h" @@ -492,6 +493,9 @@ static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { "Local: No offset to automatically reset to"), _ERR_DESC(RD_KAFKA_RESP_ERR__LOG_TRUNCATION, "Local: Partition log truncation detected"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD, + "Local: an invalid record in the same batch caused " + "the failure of this message too."), _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"), _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"), @@ -700,6 +704,17 @@ static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE, "Broker: Request principal deserialization failed during " "forwarding"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID, "Broker: Unknown topic id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH, + "Broker: The member epoch is fenced by the group coordinator"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID, + "Broker: The instance ID is still used by another member in the " + "consumer group"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR, + "Broker: The assignor or its version range is not supported by " + "the consumer group"), + _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH, + "Broker: The member epoch is stale"), _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)}; @@ -1601,6 +1616,7 @@ static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st, [RD_KAFKAP_BrokerHeartbeat] = rd_true, [RD_KAFKAP_UnregisterBroker] = rd_true, [RD_KAFKAP_AllocateProducerIds] = rd_true, + [RD_KAFKAP_ConsumerGroupHeartbeat] = rd_true, }, [3 /*hide-unless-non-zero*/] = { /* Hide Admin requests unless they've been used */ @@ -2114,7 +2130,10 @@ static int rd_kafka_thread_main(void *arg) { RD_KAFKA_CGRP_STATE_TERM)))) { rd_ts_t sleeptime = rd_kafka_timers_next( &rk->rk_timers, 1000 * 1000 /*1s*/, 1 /*lock*/); - rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0, + /* Use ceiling division to avoid calling serve with a 0 ms + * timeout in a tight loop until 1 ms has passed. */ + int timeout_ms = (sleeptime + 999) / 1000; + rd_kafka_q_serve(rk->rk_ops, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK, NULL, NULL); if (rk->rk_cgrp) /* FIXME: move to timer-triggered */ rd_kafka_cgrp_serve(rk->rk_cgrp); @@ -2167,6 +2186,7 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; int ret_errno = 0; const char *conf_err; + char *group_remote_assignor_override = NULL; #ifndef _WIN32 sigset_t newset, oldset; #endif @@ -2358,6 +2378,64 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, goto fail; } + if (!rk->rk_conf.group_remote_assignor) { + rd_kafka_assignor_t *cooperative_assignor; + + /* Detect if chosen assignor is cooperative + * FIXME: remove this compatibility altogether + * and apply the breaking changes that will be required + * in next major version. */ + + cooperative_assignor = + rd_kafka_assignor_find(rk, "cooperative-sticky"); + rk->rk_conf.partition_assignors_cooperative = + !rk->rk_conf.partition_assignors.rl_cnt || + (cooperative_assignor && + cooperative_assignor->rkas_enabled); + + if (rk->rk_conf.group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + /* Default remote assignor to the chosen local one. */ + if (rk->rk_conf.partition_assignors_cooperative) { + group_remote_assignor_override = + rd_strdup("uniform"); + rk->rk_conf.group_remote_assignor = + group_remote_assignor_override; + } else { + rd_kafka_assignor_t *range_assignor = + rd_kafka_assignor_find(rk, "range"); + if (range_assignor && + range_assignor->rkas_enabled) { + rd_kafka_log( + rk, LOG_WARNING, "ASSIGNOR", + "\"range\" assignor is sticky " + "with group protocol CONSUMER"); + group_remote_assignor_override = + rd_strdup("range"); + rk->rk_conf.group_remote_assignor = + group_remote_assignor_override; + } else { + rd_kafka_log( + rk, LOG_WARNING, "ASSIGNOR", + "roundrobin assignor isn't " + "available " + "with group protocol CONSUMER, " + "using the \"uniform\" one. " + "It's similar, " + "but it's also sticky"); + group_remote_assignor_override = + rd_strdup("uniform"); + rk->rk_conf.group_remote_assignor = + group_remote_assignor_override; + } + } + } + } else { + /* When users starts setting properties of the new protocol, + * they can only use incremental_assign/unassign. */ + rk->rk_conf.partition_assignors_cooperative = rd_true; + } + /* Create Mock cluster */ rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0); if (rk->rk_conf.mock.broker_cnt > 0) { @@ -2436,8 +2514,9 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) { /* Create consumer group handle */ - rk->rk_cgrp = rd_kafka_cgrp_new(rk, rk->rk_group_id, - rk->rk_client_id); + rk->rk_cgrp = rd_kafka_cgrp_new( + rk, rk->rk_conf.group_protocol, rk->rk_group_id, + rk->rk_client_id); rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_cgrp->rkcg_q); } else { @@ -2626,6 +2705,8 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, * that belong to rk_conf and thus needs to be cleaned up. * Legacy APIs, sigh.. */ if (app_conf) { + if (group_remote_assignor_override) + rd_free(group_remote_assignor_override); rd_kafka_assignors_term(rk); rd_kafka_interceptors_destroy(&rk->rk_conf); memset(&rk->rk_conf, 0, sizeof(rk->rk_conf)); @@ -5071,6 +5152,38 @@ rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid) { return copy_uuid; } +/** + * Returns a new non cryptographically secure UUIDv4 (random). + * + * @return A UUIDv4. + * + * @remark Must be freed after use using rd_kafka_Uuid_destroy(). + */ +rd_kafka_Uuid_t rd_kafka_Uuid_random() { + int i; + unsigned char rand_values_bytes[16] = {0}; + uint64_t *rand_values_uint64 = (uint64_t *)rand_values_bytes; + unsigned char *rand_values_app; + rd_kafka_Uuid_t ret = RD_KAFKA_UUID_ZERO; + for (i = 0; i < 16; i += 2) { + uint16_t rand_uint16 = (uint16_t)rd_jitter(0, INT16_MAX - 1); + /* No need to convert endianess here because it's still only + * a random value. */ + rand_values_app = (unsigned char *)&rand_uint16; + rand_values_bytes[i] |= rand_values_app[0]; + rand_values_bytes[i + 1] |= rand_values_app[1]; + } + + rand_values_bytes[6] &= 0x0f; /* clear version */ + rand_values_bytes[6] |= 0x40; /* version 4 */ + rand_values_bytes[8] &= 0x3f; /* clear variant */ + rand_values_bytes[8] |= 0x80; /* IETF variant */ + + ret.most_significant_bits = be64toh(rand_values_uint64[0]); + ret.least_significant_bits = be64toh(rand_values_uint64[1]); + return ret; +} + /** * @brief Destroy the provided uuid. * @@ -5080,6 +5193,40 @@ void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid) { rd_free(uuid); } +/** + * @brief Computes canonical encoding for the given uuid string. + * Mainly useful for testing. + * + * @param uuid UUID for which canonical encoding is required. + * + * @return canonical encoded string for the given UUID. + * + * @remark Must be freed after use. + */ +const char *rd_kafka_Uuid_str(const rd_kafka_Uuid_t *uuid) { + int i, j; + unsigned char bytes[16]; + char *ret = rd_calloc(37, sizeof(*ret)); + + for (i = 0; i < 8; i++) { +#if __BYTE_ORDER == __LITTLE_ENDIAN + j = 7 - i; +#elif __BYTE_ORDER == __BIG_ENDIAN + j = i; +#endif + bytes[i] = (uuid->most_significant_bits >> (8 * j)) & 0xFF; + bytes[8 + i] = (uuid->least_significant_bits >> (8 * j)) & 0xFF; + } + + rd_snprintf(ret, 37, + "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%" + "02x%02x%02x", + bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], + bytes[6], bytes[7], bytes[8], bytes[9], bytes[10], + bytes[11], bytes[12], bytes[13], bytes[14], bytes[15]); + return ret; +} + const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid) { if (*uuid->base64str) return uuid->base64str; @@ -5106,6 +5253,17 @@ const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid) { return uuid->base64str; } +unsigned int rd_kafka_Uuid_hash(const rd_kafka_Uuid_t *uuid) { + unsigned char bytes[16]; + memcpy(bytes, &uuid->most_significant_bits, 8); + memcpy(&bytes[8], &uuid->least_significant_bits, 8); + return rd_bytes_hash(bytes, 16); +} + +unsigned int rd_kafka_Uuid_map_hash(const void *key) { + return rd_kafka_Uuid_hash(key); +} + int64_t rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid) { return uuid->least_significant_bits; } @@ -5113,4 +5271,4 @@ int64_t rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid) { int64_t rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid) { return uuid->most_significant_bits; -} \ No newline at end of file +} diff --git a/lib/librdkafka-2.3.0/src/rdkafka.h b/lib/librdkafka-2.4.0/src/rdkafka.h similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka.h rename to lib/librdkafka-2.4.0/src/rdkafka.h index de620284f0d..e403b895f67 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka.h +++ b/lib/librdkafka-2.4.0/src/rdkafka.h @@ -167,7 +167,7 @@ typedef SSIZE_T ssize_t; * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x020300ff +#define RD_KAFKA_VERSION 0x020400ff /** * @brief Returns the librdkafka version as integer. @@ -407,6 +407,9 @@ typedef enum { RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, /** Partition log truncation detected */ RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139, + /** A different record in the batch was invalid + * and this message failed persisting. */ + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD = -138, /** End internal error codes */ RD_KAFKA_RESP_ERR__END = -100, @@ -631,7 +634,18 @@ typedef enum { RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96, /** Request principal deserialization failed during forwarding */ RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97, - + /** Unknown Topic Id */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID = 100, + /** The member epoch is fenced by the group coordinator */ + RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH = 110, + /** The instance ID is still used by another member in the + * consumer group */ + RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID = 111, + /** The assignor or its version range is not supported by the consumer + * group */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR = 112, + /** The member epoch is stale */ + RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH = 113, RD_KAFKA_RESP_ERR_END_ALL, } rd_kafka_resp_err_t; @@ -1486,6 +1500,16 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); RD_EXPORT const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); +/** + * @brief Returns the error string for an errored produced rd_kafka_message_t or + * NULL if there was no error. + * + * @remark This function MUST used with the producer. + */ +RD_EXPORT +const char * +rd_kafka_message_produce_errstr(const rd_kafka_message_t *rkmessage); + /** * @brief Returns the message timestamp for a consumed message. @@ -4402,6 +4426,21 @@ RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); * or successfully scheduled if asynchronous, or failed. * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised * a fatal error. + * + * FIXME: Update below documentation. + * + * RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH is returned, when + * using `group.protocol=consumer`, if the commit failed because the + * member has switched to a new member epoch. + * This error code can be retried. + * Partition level error is also set in the \p offsets. + * + * RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID is returned, when + * using `group.protocol=consumer`, if the member has been + * removed from the consumer group + * This error code is permanent, uncommitted messages will be + * reprocessed by this or a different member and committed there. + * Partition level error is also set in the \p offsets. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, @@ -4546,6 +4585,20 @@ rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, const char *group_instance_id); +/** + * @brief Get member id of a group metadata. + * + * @param group_metadata The group metadata + * + * @returns The member id contained in the passed \p group_metadata. + * + * @remark The returned pointer has the same lifetime as \p group_metadata. + */ +RD_EXPORT +const char *rd_kafka_consumer_group_metadata_member_id( + const rd_kafka_consumer_group_metadata_t *group_metadata); + + /** * @brief Frees the consumer group metadata object as returned by * rd_kafka_consumer_group_metadata(). diff --git a/lib/librdkafka-2.3.0/src/rdkafka_admin.c b/lib/librdkafka-2.4.0/src/rdkafka_admin.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_admin.c rename to lib/librdkafka-2.4.0/src/rdkafka_admin.c index 4184d1cdc6e..924fb9506ce 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_admin.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_admin.c @@ -1507,7 +1507,7 @@ rd_kafka_admin_MetadataRequest(rd_kafka_broker_t *rkb, rd_kafka_replyq_t replyq, void *opaque) { return rd_kafka_MetadataRequest_resp_cb( - rkb, topics, reason, + rkb, topics, NULL, reason, rd_false /* No admin operation requires topic creation. */, include_cluster_authorized_operations, include_topic_authorized_operations, @@ -3938,7 +3938,8 @@ rd_kafka_DeleteRecordsResponse_parse(rd_kafka_op_t *rko_req, RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - offsets = rd_kafka_buf_read_topic_partitions(reply, 0, fields); + offsets = rd_kafka_buf_read_topic_partitions( + reply, rd_false /*don't use topic_id*/, rd_true, 0, fields); if (!offsets) rd_kafka_buf_parse_fail(reply, "Failed to parse topic partitions"); @@ -4924,7 +4925,8 @@ rd_kafka_OffsetDeleteResponse_parse(rd_kafka_op_t *rko_req, RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - partitions = rd_kafka_buf_read_topic_partitions(reply, 16, fields); + partitions = rd_kafka_buf_read_topic_partitions( + reply, rd_false /*don't use topic_id*/, rd_true, 16, fields); if (!partitions) { rd_snprintf(errstr, errstr_size, "Failed to parse OffsetDeleteResponse partitions"); @@ -6488,7 +6490,7 @@ rd_kafka_DeleteAclsResponse_parse(rd_kafka_op_t *rko_req, result_response = rd_kafka_DeleteAcls_result_response_new(error_code, errstr); - /* #maching_acls */ + /* #matching_acls */ rd_kafka_buf_read_arraycnt(reply, &matching_acls_cnt, 100000); for (j = 0; j < (int)matching_acls_cnt; j++) { int16_t acl_error_code; @@ -6959,8 +6961,8 @@ static rd_kafka_resp_err_t rd_kafka_ListConsumerGroupOffsetsRequest( require_stable_offsets = rd_kafka_confval_get_int(&options->require_stable_offsets); rd_kafka_OffsetFetchRequest( - rkb, grpoffsets->group_id, grpoffsets->partitions, - require_stable_offsets, op_timeout, replyq, resp_cb, opaque); + rkb, grpoffsets->group_id, grpoffsets->partitions, rd_false, -1, + NULL, require_stable_offsets, op_timeout, replyq, resp_cb, opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -8112,7 +8114,8 @@ rd_kafka_DescribeConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req, {RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; partitions = rd_kafka_buf_read_topic_partitions( - rkbuf, 0, fields); + rkbuf, rd_false /*don't use topic_id*/, + rd_true, 0, fields); rd_kafka_buf_destroy(rkbuf); if (!partitions) rd_kafka_buf_parse_fail( diff --git a/lib/librdkafka-2.3.0/src/rdkafka_admin.h b/lib/librdkafka-2.4.0/src/rdkafka_admin.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_admin.h rename to lib/librdkafka-2.4.0/src/rdkafka_admin.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_assignment.c b/lib/librdkafka-2.4.0/src/rdkafka_assignment.c similarity index 93% rename from lib/librdkafka-2.3.0/src/rdkafka_assignment.c rename to lib/librdkafka-2.4.0/src/rdkafka_assignment.c index 3b0d7e83d77..6d1f01913f9 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_assignment.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_assignment.c @@ -153,8 +153,30 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk, continue; } - if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT || - rktpar->err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) { + if (err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH || + rktpar->err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH) { + rd_kafka_topic_partition_t *rktpar_copy; + + rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", + "Adding %s [%" PRId32 + "] back to pending " + "list because of stale member epoch", + rktpar->topic, rktpar->partition); + + rktpar_copy = rd_kafka_topic_partition_list_add_copy( + rk->rk_consumer.assignment.pending, rktpar); + /* Need to reset offset to STORED to query for + * the committed offset again. If the offset is + * kept INVALID then auto.offset.reset will be + * triggered. + * + * Not necessary if err is UNSTABLE_OFFSET_COMMIT + * because the buffer is retried there. */ + rktpar_copy->offset = RD_KAFKA_OFFSET_STORED; + + } else if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT || + rktpar->err == + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) { /* Ongoing transactions are blocking offset retrieval. * This is typically retried from the OffsetFetch * handler but we can come here if the assignment @@ -210,7 +232,9 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk, /* Do nothing for request-level errors (err is set). */ } - if (offsets->cnt > 0) + /* In case of stale member epoch we retry to serve the + * assignment only after a successful ConsumerGroupHeartbeat. */ + if (offsets->cnt > 0 && err != RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH) rd_kafka_assignment_serve(rk); } @@ -274,18 +298,32 @@ static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk, return; } - - if (err) { - rd_kafka_dbg(rk, CGRP, "OFFSET", - "Offset fetch error for %d partition(s): %s", - offsets->cnt, rd_kafka_err2str(err)); - rd_kafka_consumer_err( - rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL, - NULL, RD_KAFKA_OFFSET_INVALID, - "Failed to fetch committed offsets for " - "%d partition(s) in group \"%s\": %s", - offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err)); + switch (err) { + case RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH: + rk->rk_cgrp->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING; + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, + "OffsetFetch error: Stale member epoch"); + break; + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, "OffsetFetch error: Unknown member"); + break; + default: + rd_kafka_dbg( + rk, CGRP, "OFFSET", + "Offset fetch error for %d partition(s): %s", + offsets->cnt, rd_kafka_err2str(err)); + rd_kafka_consumer_err( + rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, + NULL, NULL, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed offsets for " + "%d partition(s) in group \"%s\": %s", + offsets->cnt, rk->rk_group_id->str, + rd_kafka_err2str(err)); + } } /* Apply the fetched offsets to the assignment */ @@ -543,7 +581,8 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) { partitions_to_query->cnt); rd_kafka_OffsetFetchRequest( - coord, rk->rk_group_id->str, partitions_to_query, + coord, rk->rk_group_id->str, partitions_to_query, rd_false, + -1, NULL, rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/, 0, /* Timeout */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_assignment.h b/lib/librdkafka-2.4.0/src/rdkafka_assignment.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_assignment.h rename to lib/librdkafka-2.4.0/src/rdkafka_assignment.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_assignor.c b/lib/librdkafka-2.4.0/src/rdkafka_assignor.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_assignor.c rename to lib/librdkafka-2.4.0/src/rdkafka_assignor.c index 607a7bfd5aa..465568c41da 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_assignor.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_assignor.c @@ -159,7 +159,8 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new( rd_kafka_buf_write_topic_partitions( rkbuf, owned_partitions, rd_false /*don't skip invalid offsets*/, - rd_false /*any offset*/, fields); + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); } /* Following data is ignored by consumer version < 2 */ @@ -1278,7 +1279,7 @@ int verifyValidityAndBalance0(const char *func, * it means the assignment strategy failed to * properly balance the partitions. */ if (!balanced && - rd_kafka_topic_partition_list_find_topic( + rd_kafka_topic_partition_list_find_topic_by_name( otherPartitions, partition->topic)) { RD_UT_WARN( "Some %s partition(s) can be " diff --git a/lib/librdkafka-2.3.0/src/rdkafka_assignor.h b/lib/librdkafka-2.4.0/src/rdkafka_assignor.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_assignor.h rename to lib/librdkafka-2.4.0/src/rdkafka_assignor.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_aux.c b/lib/librdkafka-2.4.0/src/rdkafka_aux.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_aux.c rename to lib/librdkafka-2.4.0/src/rdkafka_aux.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_aux.h b/lib/librdkafka-2.4.0/src/rdkafka_aux.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_aux.h rename to lib/librdkafka-2.4.0/src/rdkafka_aux.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_background.c b/lib/librdkafka-2.4.0/src/rdkafka_background.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_background.c rename to lib/librdkafka-2.4.0/src/rdkafka_background.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_broker.c b/lib/librdkafka-2.4.0/src/rdkafka_broker.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_broker.c rename to lib/librdkafka-2.4.0/src/rdkafka_broker.c index e92f008bfc2..685cf5bfc61 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_broker.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_broker.c @@ -2921,9 +2921,10 @@ static void rd_kafka_broker_retry_bufs_move(rd_kafka_broker_t *rkb, * To avoid extra iterations, the \p err and \p status are set on * the message as they are popped off the OP_DR msgq in rd_kafka_poll() et.al */ -void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t err) { +void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t err, + const rd_kafka_Produce_result_t *presult) { rd_kafka_t *rk = rkt->rkt_rk; if (unlikely(rd_kafka_msgq_len(rkmq) == 0)) @@ -2934,7 +2935,11 @@ void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt, rd_kafka_msgq_len(rkmq)); /* Call on_acknowledgement() interceptors */ - rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq, err); + rd_kafka_interceptors_on_acknowledgement_queue( + rk, rkmq, + (presult && presult->record_errors_cnt > 1) + ? RD_KAFKA_RESP_ERR_NO_ERROR + : err); if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE && (!rk->rk_conf.dr_err_only || err)) { @@ -2944,6 +2949,9 @@ void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt, rko = rd_kafka_op_new(RD_KAFKA_OP_DR); rko->rko_err = err; rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt); + if (presult) + rko->rko_u.dr.presult = + rd_kafka_Produce_result_copy(presult); rd_kafka_msgq_init(&rko->rko_u.dr.msgq); /* Move all messages to op's msgq */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_broker.h b/lib/librdkafka-2.4.0/src/rdkafka_broker.h similarity index 98% rename from lib/librdkafka-2.3.0/src/rdkafka_broker.h rename to lib/librdkafka-2.4.0/src/rdkafka_broker.h index 30f66b25c9d..41bc3d3eaf2 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_broker.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_broker.h @@ -517,9 +517,13 @@ void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr); int rd_kafka_send(rd_kafka_broker_t *rkb); int rd_kafka_recv(rd_kafka_broker_t *rkb); -void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t err); +#define rd_kafka_dr_msgq(rkt, rkmq, err) \ + rd_kafka_dr_msgq0(rkt, rkmq, err, NULL /*no produce result*/) + +void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t err, + const rd_kafka_Produce_result_t *presult); void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, diff --git a/lib/librdkafka-2.3.0/src/rdkafka_buf.c b/lib/librdkafka-2.4.0/src/rdkafka_buf.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_buf.c rename to lib/librdkafka-2.4.0/src/rdkafka_buf.c index 362f57a27d2..292c21819ca 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_buf.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_buf.c @@ -38,6 +38,8 @@ void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) { case RD_KAFKAP_Metadata: if (rkbuf->rkbuf_u.Metadata.topics) rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics); + if (rkbuf->rkbuf_u.Metadata.topic_ids) + rd_list_destroy(rkbuf->rkbuf_u.Metadata.topic_ids); if (rkbuf->rkbuf_u.Metadata.reason) rd_free(rkbuf->rkbuf_u.Metadata.reason); if (rkbuf->rkbuf_u.Metadata.rko) diff --git a/lib/librdkafka-2.3.0/src/rdkafka_buf.h b/lib/librdkafka-2.4.0/src/rdkafka_buf.h similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_buf.h rename to lib/librdkafka-2.4.0/src/rdkafka_buf.h index 099f705018a..5993d704fde 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_buf.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_buf.h @@ -375,8 +375,10 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ union { struct { - rd_list_t *topics; /* Requested topics (char *) */ - char *reason; /* Textual reason */ + rd_list_t *topics; /* Requested topics (char *) */ + rd_list_t * + topic_ids; /* Requested topic ids rd_kafka_Uuid_t */ + char *reason; /* Textual reason */ rd_kafka_op_t *rko; /* Originating rko with replyq * (if any) */ rd_bool_t all_topics; /**< Full/All topics requested */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_cert.c b/lib/librdkafka-2.4.0/src/rdkafka_cert.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_cert.c rename to lib/librdkafka-2.4.0/src/rdkafka_cert.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_cert.h b/lib/librdkafka-2.4.0/src/rdkafka_cert.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_cert.h rename to lib/librdkafka-2.4.0/src/rdkafka_cert.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_cgrp.c b/lib/librdkafka-2.4.0/src/rdkafka_cgrp.c similarity index 80% rename from lib/librdkafka-2.3.0/src/rdkafka_cgrp.c rename to lib/librdkafka-2.4.0/src/rdkafka_cgrp.c index eb953bb56b2..1917991ddd9 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_cgrp.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_cgrp.c @@ -98,6 +98,7 @@ static void rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg, rd_kafka_topic_partition_list_t *assignment); +static void rd_kafka_cgrp_consumer_assignment_done(rd_kafka_cgrp_t *rkcg); /** * @returns true if the current assignment is lost. @@ -171,6 +172,16 @@ rd_kafka_cgrp_assignment_clear_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) { */ rd_kafka_rebalance_protocol_t rd_kafka_cgrp_rebalance_protocol(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + if (!(rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE)) + return RD_KAFKA_REBALANCE_PROTOCOL_NONE; + + return rkcg->rkcg_rk->rk_conf.partition_assignors_cooperative + ? RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE + : RD_KAFKA_REBALANCE_PROTOCOL_EAGER; + } + if (!rkcg->rkcg_assignor) return RD_KAFKA_REBALANCE_PROTOCOL_NONE; return rkcg->rkcg_assignor->rkas_protocol; @@ -216,7 +227,12 @@ static void rd_kafka_cgrp_clear_wait_resp(rd_kafka_cgrp_t *rkcg, rkcg->rkcg_wait_resp = -1; } - +/** + * @brief No-op, just serves for awaking the main loop when needed. + * TODO: complete the refactor and serve directly from here. + */ +static void rd_kafka_cgrp_serve_timer_cb(rd_kafka_timers_t *rkts, void *arg) { +} /** * @struct Auxillary glue type used for COOPERATIVE rebalance set operations. @@ -359,9 +375,17 @@ void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg) { rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_subscription); rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_group_leader.members); rd_kafka_cgrp_set_member_id(rkcg, NULL); + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_current_assignment); + RD_IF_FREE(rkcg->rkcg_target_assignment, + rd_kafka_topic_partition_list_destroy); + RD_IF_FREE(rkcg->rkcg_next_target_assignment, + rd_kafka_topic_partition_list_destroy); if (rkcg->rkcg_group_instance_id) rd_kafkap_str_destroy(rkcg->rkcg_group_instance_id); - + if (rkcg->rkcg_group_remote_assignor) + rd_kafkap_str_destroy(rkcg->rkcg_group_remote_assignor); + if (rkcg->rkcg_client_rack) + rd_kafkap_str_destroy(rkcg->rkcg_client_rack); rd_kafka_q_destroy_owner(rkcg->rkcg_q); rd_kafka_q_destroy_owner(rkcg->rkcg_ops); rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q); @@ -398,18 +422,19 @@ rd_kafka_cgrp_update_session_timeout(rd_kafka_cgrp_t *rkcg, rd_bool_t reset) { rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, + rd_kafka_group_protocol_t group_protocol, const rd_kafkap_str_t *group_id, const rd_kafkap_str_t *client_id) { rd_kafka_cgrp_t *rkcg; - rkcg = rd_calloc(1, sizeof(*rkcg)); - rkcg->rkcg_rk = rk; - rkcg->rkcg_group_id = group_id; - rkcg->rkcg_client_id = client_id; - rkcg->rkcg_coord_id = -1; - rkcg->rkcg_generation_id = -1; - rkcg->rkcg_wait_resp = -1; + rkcg->rkcg_rk = rk; + rkcg->rkcg_group_protocol = group_protocol; + rkcg->rkcg_group_id = group_id; + rkcg->rkcg_client_id = client_id; + rkcg->rkcg_coord_id = -1; + rkcg->rkcg_generation_id = -1; + rkcg->rkcg_wait_resp = -1; rkcg->rkcg_ops = rd_kafka_q_new(rk); rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve; @@ -420,7 +445,14 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, rkcg->rkcg_q = rd_kafka_consume_q_new(rk); rkcg->rkcg_group_instance_id = rd_kafkap_str_new(rk->rk_conf.group_instance_id, -1); - + rkcg->rkcg_group_remote_assignor = + rd_kafkap_str_new(rk->rk_conf.group_remote_assignor, -1); + if (!RD_KAFKAP_STR_LEN(rkcg->rkcg_rk->rk_conf.client_rack)) + rkcg->rkcg_client_rack = rd_kafkap_str_new(NULL, -1); + else + rkcg->rkcg_client_rack = + rd_kafkap_str_copy(rkcg->rkcg_rk->rk_conf.client_rack); + rkcg->rkcg_next_subscription = NULL; TAILQ_INIT(&rkcg->rkcg_topics); rd_list_init(&rkcg->rkcg_toppars, 32, NULL); rd_kafka_cgrp_set_member_id(rkcg, ""); @@ -432,6 +464,9 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, rd_interval_init(&rkcg->rkcg_timeout_scan_intvl); rd_atomic32_init(&rkcg->rkcg_assignment_lost, rd_false); rd_atomic32_init(&rkcg->rkcg_terminated, rd_false); + rkcg->rkcg_current_assignment = rd_kafka_topic_partition_list_new(0); + rkcg->rkcg_target_assignment = NULL; + rkcg->rkcg_next_target_assignment = NULL; rkcg->rkcg_errored_topics = rd_kafka_topic_partition_list_new(0); @@ -451,6 +486,13 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, rk->rk_conf.auto_commit_interval_ms * 1000ll, rd_kafka_cgrp_offset_commit_tmr_cb, rkcg); + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_log( + rk, LOG_WARNING, "CGRP", + "KIP-848 Consumer Group Protocol is in Early Access " + "and MUST NOT be used in production"); + } + return rkcg; } @@ -852,6 +894,120 @@ static void rd_kafka_cgrp_handle_LeaveGroup(rd_kafka_t *rk, goto err; } +static void rd_kafka_cgrp_consumer_reset(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_group_protocol != RD_KAFKA_GROUP_PROTOCOL_CONSUMER) + return; + + rkcg->rkcg_generation_id = 0; + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_current_assignment); + RD_IF_FREE(rkcg->rkcg_target_assignment, + rd_kafka_topic_partition_list_destroy); + rkcg->rkcg_target_assignment = NULL; + RD_IF_FREE(rkcg->rkcg_next_target_assignment, + rd_kafka_topic_partition_list_destroy); + rkcg->rkcg_next_target_assignment = NULL; + rkcg->rkcg_current_assignment = rd_kafka_topic_partition_list_new(0); + + /* Leave only specified flags, reset the rest */ + rkcg->rkcg_consumer_flags = + (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE) | + (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE); +} + +/** + * @brief cgrp handling of ConsumerGroupHeartbeat response after leaving group + * @param opaque must be the cgrp handle. + * @locality rdkafka main thread (unless err==ERR__DESTROY) + */ +static void +rd_kafka_cgrp_handle_ConsumerGroupHeartbeat_leave(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode = 0; + + if (err) { + ErrorCode = err; + goto err; + } + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + +err: + if (ErrorCode) + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "LEAVEGROUP", + "ConsumerGroupHeartbeat response error in state %s: %s", + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_err2str(ErrorCode)); + else + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "LEAVEGROUP", + "ConsumerGroupHeartbeat response received in state %s", + rd_kafka_cgrp_state_names[rkcg->rkcg_state]); + + rd_kafka_cgrp_consumer_reset(rkcg); + + if (ErrorCode != RD_KAFKA_RESP_ERR__DESTROY) { + rd_assert(thrd_is_current(rk->rk_thread)); + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE; + rd_kafka_cgrp_try_terminate(rkcg); + } + + return; + +err_parse: + ErrorCode = rkbuf->rkbuf_err; + goto err; +} + +static void rd_kafka_cgrp_consumer_leave(rd_kafka_cgrp_t *rkcg) { + int32_t member_epoch = -1; + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE", + "Group \"%.*s\": leave (in state %s): " + "ConsumerGroupHeartbeat already in-transit", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_state_names[rkcg->rkcg_state]); + return; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE", + "Group \"%.*s\": leave (in state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_state_names[rkcg->rkcg_state]); + + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WAIT_LEAVE; + if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg)) { + member_epoch = -2; + } + + if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) { + rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "LEAVE", + "Leaving group"); + rd_kafka_ConsumerGroupHeartbeatRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id, + member_epoch, rkcg->rkcg_group_instance_id, + NULL /* no rack */, -1 /* no rebalance_timeout_ms */, + NULL /* no subscription */, NULL /* no remote assignor */, + NULL /* no current assignment */, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_ConsumerGroupHeartbeat_leave, rkcg); + } else { + rd_kafka_cgrp_handle_ConsumerGroupHeartbeat_leave( + rkcg->rkcg_rk, rkcg->rkcg_coord, + RD_KAFKA_RESP_ERR__WAIT_COORD, NULL, NULL, rkcg); + } +} static void rd_kafka_cgrp_leave(rd_kafka_cgrp_t *rkcg) { char *member_id; @@ -905,22 +1061,25 @@ static rd_bool_t rd_kafka_cgrp_leave_maybe(rd_kafka_cgrp_t *rkcg) { rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE; - /* Don't send Leave when termating with NO_CONSUMER_CLOSE flag */ + /* Don't send Leave when terminating with NO_CONSUMER_CLOSE flag */ if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) return rd_false; - /* KIP-345: Static group members must not send a LeaveGroupRequest - * on termination. */ - if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) && - rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) - return rd_false; + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_leave(rkcg); + } else { + /* KIP-345: Static group members must not send a + * LeaveGroupRequest on termination. */ + if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) && + rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) + return rd_false; - rd_kafka_cgrp_leave(rkcg); + rd_kafka_cgrp_leave(rkcg); + } return rd_true; } - /** * @brief Enqueues a rebalance op, delegating responsibility of calling * incremental_assign / incremental_unassign to the application. @@ -1215,7 +1374,9 @@ static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) { rd_kafka_cgrp_leave_maybe(rkcg); } + rd_kafka_cgrp_consumer_reset(rkcg); rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT); + rd_kafka_cgrp_consumer_expedite_next_heartbeat(rkcg, "rejoining"); } @@ -1513,8 +1674,8 @@ static void rd_kafka_cgrp_handle_SyncGroup_memberstate( const rd_kafka_topic_partition_field_t fields[] = { RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - if (!(assignment = - rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields))) + if (!(assignment = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields))) goto err_parse; rd_kafka_buf_read_kbytes(rkbuf, &UserData); @@ -1814,8 +1975,8 @@ static int rd_kafka_group_MemberMetadata_consumer_read( RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; if (Version >= 1 && - !(rkgm->rkgm_owned = - rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields))) + !(rkgm->rkgm_owned = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields))) goto err; if (Version >= 2) { @@ -2048,7 +2209,7 @@ static void rd_kafka_cgrp_handle_JoinGroup(rd_kafka_t *rk, rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL); rd_kafka_MetadataRequest( - rkb, &topics, "partition assignor", + rkb, &topics, NULL, "partition assignor", rd_false /*!allow_auto_create*/, /* cgrp_update=false: * Since the subscription list may not be identical @@ -2446,80 +2607,612 @@ static rd_bool_t rd_kafka_cgrp_update_subscribed_topics(rd_kafka_cgrp_t *rkcg, return rd_true; } +/** + * Compares a new target assignment with + * existing consumer group assignment. + * + * Returns that they're the same assignment + * in two cases: + * + * 1) If target assignment is present and the + * new assignment is same as target assignment, + * then we are already in process of adding that + * target assignment. + * 2) If target assignment is not present and + * the new assignment is same as current assignment, + * then we are already at correct assignment. + * + * @param new_target_assignment New target assignment + * + * @return Is the new assignment different from what's being handled by + * group \p cgrp ? + **/ +static rd_bool_t rd_kafka_cgrp_consumer_is_new_assignment_different( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *new_target_assignment) { + int is_assignment_different; + if (rkcg->rkcg_target_assignment) { + is_assignment_different = rd_kafka_topic_partition_list_cmp( + new_target_assignment, rkcg->rkcg_target_assignment, + rd_kafka_topic_partition_by_id_cmp); + } else { + is_assignment_different = rd_kafka_topic_partition_list_cmp( + new_target_assignment, rkcg->rkcg_current_assignment, + rd_kafka_topic_partition_by_id_cmp); + } + return is_assignment_different ? rd_true : rd_false; +} + +static rd_kafka_op_res_t rd_kafka_cgrp_consumer_handle_next_assignment( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *new_target_assignment, + rd_bool_t clear_next_assignment) { + rd_bool_t is_assignment_different = rd_false; + rd_bool_t has_next_target_assignment_to_clear = + rkcg->rkcg_next_target_assignment && clear_next_assignment; + if (rkcg->rkcg_consumer_flags & RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Reconciliation in progress, " + "postponing next one"); + return RD_KAFKA_OP_RES_HANDLED; + } + + is_assignment_different = + rd_kafka_cgrp_consumer_is_new_assignment_different( + rkcg, new_target_assignment); + + /* Starts reconcilation only when the group is in state + * INIT or state STEADY, keeps it as next target assignment + * otherwise. */ + if (!is_assignment_different) { + if (has_next_target_assignment_to_clear) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_next_target_assignment); + rkcg->rkcg_next_target_assignment = NULL; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Not reconciling new assignment: " + "Assignment is the same. " + "Next assignment %s", + (has_next_target_assignment_to_clear + ? "cleared" + : "not cleared")); + + } else if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT || + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY) { + rkcg->rkcg_consumer_flags |= RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK; + if (rkcg->rkcg_target_assignment) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_target_assignment); + } + rkcg->rkcg_target_assignment = + rd_kafka_topic_partition_list_copy(new_target_assignment); + + if (has_next_target_assignment_to_clear) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_next_target_assignment); + rkcg->rkcg_next_target_assignment = NULL; + } + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_target_assignment_str[512] = "NULL"; + + rd_kafka_topic_partition_list_str( + rkcg->rkcg_target_assignment, + rkcg_target_assignment_str, + sizeof(rkcg_target_assignment_str), 0); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Reconciliation starts with new target " + "assignment \"%s\". " + "Next assignment %s", + rkcg_target_assignment_str, + (has_next_target_assignment_to_clear + ? "cleared" + : "not cleared")); + } + rd_kafka_cgrp_handle_assignment(rkcg, + rkcg->rkcg_target_assignment); + } + + return RD_KAFKA_OP_RES_HANDLED; +} + +static rd_kafka_topic_partition_list_t * +rd_kafka_cgrp_consumer_assignment_with_metadata( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment, + rd_list_t **missing_topic_ids) { + int i; + rd_kafka_t *rk = rkcg->rkcg_rk; + rd_kafka_topic_partition_list_t *assignment_with_metadata = + rd_kafka_topic_partition_list_new(assignment->cnt); + for (i = 0; i < assignment->cnt; i++) { + struct rd_kafka_metadata_cache_entry *rkmce; + rd_kafka_topic_partition_t *rktpar; + char *topic_name = NULL; + rd_kafka_Uuid_t request_topic_id = + rd_kafka_topic_partition_get_topic_id( + &assignment->elems[i]); + + rd_kafka_rdlock(rk); + rkmce = + rd_kafka_metadata_cache_find_by_id(rk, request_topic_id, 1); + + if (rkmce) + topic_name = rd_strdup(rkmce->rkmce_mtopic.topic); + rd_kafka_rdunlock(rk); + + if (unlikely(!topic_name)) { + rktpar = rd_kafka_topic_partition_list_find_topic_by_id( + rkcg->rkcg_current_assignment, request_topic_id); + if (rktpar) + topic_name = rd_strdup(rktpar->topic); + } + + if (likely(topic_name != NULL)) { + rd_kafka_topic_partition_list_add_with_topic_name_and_id( + assignment_with_metadata, request_topic_id, + topic_name, assignment->elems[i].partition); + rd_free(topic_name); + continue; + } + + if (missing_topic_ids) { + if (unlikely(!*missing_topic_ids)) + *missing_topic_ids = + rd_list_new(1, rd_list_Uuid_destroy); + rd_list_add(*missing_topic_ids, + rd_kafka_Uuid_copy(&request_topic_id)); + } + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Metadata not found for the " + "assigned topic id: %s." + " Continuing without it", + rd_kafka_Uuid_base64str(&request_topic_id)); + } + if (missing_topic_ids && *missing_topic_ids) + rd_list_deduplicate(missing_topic_ids, + (void *)rd_kafka_Uuid_ptr_cmp); + return assignment_with_metadata; +} + +/** + * @brief Op callback from handle_JoinGroup + */ +static rd_kafka_op_res_t +rd_kafka_cgrp_consumer_handle_Metadata_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + rd_kafka_op_res_t assignment_handle_ret; + rd_kafka_topic_partition_list_t *assignment_with_metadata; + rd_bool_t all_partition_metadata_available; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; /* Terminating */ + + if (!rkcg->rkcg_next_target_assignment) + return RD_KAFKA_OP_RES_HANDLED; + + assignment_with_metadata = + rd_kafka_cgrp_consumer_assignment_with_metadata( + rkcg, rkcg->rkcg_next_target_assignment, NULL); + + all_partition_metadata_available = + assignment_with_metadata->cnt == + rkcg->rkcg_next_target_assignment->cnt + ? rd_true + : rd_false; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char assignment_with_metadata_str[512] = "NULL"; + + rd_kafka_topic_partition_list_str( + assignment_with_metadata, assignment_with_metadata_str, + sizeof(assignment_with_metadata_str), 0); + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Metadata available for %d/%d of next target assignment, " + " which is: \"%s\"", + assignment_with_metadata->cnt, + rkcg->rkcg_next_target_assignment->cnt, + assignment_with_metadata_str); + } + + assignment_handle_ret = rd_kafka_cgrp_consumer_handle_next_assignment( + rkcg, assignment_with_metadata, all_partition_metadata_available); + rd_kafka_topic_partition_list_destroy(assignment_with_metadata); + return assignment_handle_ret; +} + +void rd_kafka_cgrp_consumer_next_target_assignment_request_metadata( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb) { + rd_kafka_topic_partition_list_t *assignment_with_metadata; + rd_kafka_op_t *rko; + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + rd_list_t *missing_topic_ids = NULL; + + if (!rkcg->rkcg_next_target_assignment->cnt) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "No metadata to request, continuing"); + rd_kafka_topic_partition_list_t *new_target_assignment = + rd_kafka_topic_partition_list_new(0); + rd_kafka_cgrp_consumer_handle_next_assignment( + rkcg, new_target_assignment, rd_true); + rd_kafka_topic_partition_list_destroy(new_target_assignment); + return; + } + + + assignment_with_metadata = + rd_kafka_cgrp_consumer_assignment_with_metadata( + rkcg, rkcg->rkcg_next_target_assignment, &missing_topic_ids); + + if (!missing_topic_ids) { + /* Metadata is already available for all the topics. */ + rd_kafka_cgrp_consumer_handle_next_assignment( + rkcg, assignment_with_metadata, rd_true); + rd_kafka_topic_partition_list_destroy(assignment_with_metadata); + return; + } + rd_kafka_topic_partition_list_destroy(assignment_with_metadata); + + /* Request missing metadata. */ + rko = rd_kafka_op_new_cb(rkcg->rkcg_rk, RD_KAFKA_OP_METADATA, + rd_kafka_cgrp_consumer_handle_Metadata_op); + rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL); + rd_kafka_MetadataRequest( + rkb, NULL, missing_topic_ids, "ConsumerGroupHeartbeat API Response", + rd_false /*!allow_auto_create*/, rd_false, rd_false, rko); + rd_list_destroy(missing_topic_ids); +} /** * @brief Handle Heartbeat response. */ -void rd_kafka_cgrp_handle_Heartbeat(rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +void rd_kafka_cgrp_handle_ConsumerGroupHeartbeat(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t error_code = 0; int actions = 0; + rd_kafkap_str_t error_str; + rd_kafkap_str_t member_id; + int32_t member_epoch; + int32_t heartbeat_interval_ms; if (err == RD_KAFKA_RESP_ERR__DESTROY) return; rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT); - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; - - rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; if (err) goto err; - if (request->rkbuf_reqhdr.ApiVersion >= 1) - rd_kafka_buf_read_throttle_time(rkbuf); + rd_kafka_buf_read_throttle_time(rkbuf); - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); - if (ErrorCode) { - err = ErrorCode; + rd_kafka_buf_read_i16(rkbuf, &error_code); + rd_kafka_buf_read_str(rkbuf, &error_str); + + if (error_code) { + err = error_code; goto err; } - rd_kafka_cgrp_update_session_timeout( - rkcg, rd_false /*don't update if session has expired*/); + rd_kafka_buf_read_str(rkbuf, &member_id); + rd_kafka_buf_read_i32(rkbuf, &member_epoch); + rd_kafka_buf_read_i32(rkbuf, &heartbeat_interval_ms); + + int8_t are_assignments_present; + rd_kafka_buf_read_i8(rkbuf, &are_assignments_present); + if (!RD_KAFKAP_STR_IS_NULL(&member_id)) { + rd_kafka_cgrp_set_member_id(rkcg, member_id.str); + } + rkcg->rkcg_generation_id = member_epoch; + if (heartbeat_interval_ms > 0) { + rkcg->rkcg_heartbeat_intvl_ms = heartbeat_interval_ms; + } + + if (are_assignments_present == 1) { + rd_kafka_topic_partition_list_t *assigned_topic_partitions; + const rd_kafka_topic_partition_field_t assignments_fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + assigned_topic_partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_true, rd_false /* Don't use Topic Name */, 0, + assignments_fields); + + if (rd_kafka_is_dbg(rk, CGRP)) { + char assigned_topic_partitions_str[512] = "NULL"; + + if (assigned_topic_partitions) { + rd_kafka_topic_partition_list_str( + assigned_topic_partitions, + assigned_topic_partitions_str, + sizeof(assigned_topic_partitions_str), 0); + } + + rd_kafka_dbg( + rk, CGRP, "HEARTBEAT", + "ConsumerGroupHeartbeat response received target " + "assignment \"%s\"", + assigned_topic_partitions_str); + } + + if (assigned_topic_partitions) { + RD_IF_FREE(rkcg->rkcg_next_target_assignment, + rd_kafka_topic_partition_list_destroy); + rkcg->rkcg_next_target_assignment = NULL; + if (rd_kafka_cgrp_consumer_is_new_assignment_different( + rkcg, assigned_topic_partitions)) { + rkcg->rkcg_next_target_assignment = + assigned_topic_partitions; + } else { + rd_kafka_topic_partition_list_destroy( + assigned_topic_partitions); + assigned_topic_partitions = NULL; + } + } + } + + if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY && + (rkcg->rkcg_consumer_flags & RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK) && + rkcg->rkcg_target_assignment) { + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK) { + if (rkcg->rkcg_current_assignment) + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_current_assignment); + rkcg->rkcg_current_assignment = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_target_assignment); + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_target_assignment); + rkcg->rkcg_target_assignment = NULL; + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_current_assignment_str[512] = "NULL"; + + rd_kafka_topic_partition_list_str( + rkcg->rkcg_current_assignment, + rkcg_current_assignment_str, + sizeof(rkcg_current_assignment_str), 0); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Target assignment acked, new " + "current assignment " + " \"%s\"", + rkcg_current_assignment_str); + } + } else if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) { + /* We've finished reconciliation but we weren't + * sending an ack, need to send a new HB with the ack. + */ + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "not subscribed anymore"); + } + } + + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING && + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY) { + /* TODO: Check if this should be done only for the steady state? + */ + rd_kafka_assignment_serve(rk); + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING; + } + + if (rkcg->rkcg_next_target_assignment) { + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) { + rd_kafka_cgrp_consumer_next_target_assignment_request_metadata( + rk, rkb); + } else { + /* Consumer left the group sending an HB request + * while this one was in-flight. */ + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_next_target_assignment); + rkcg->rkcg_next_target_assignment = NULL; + } + } + + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION & + ~RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST & + ~RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK; + rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rkcg->rkcg_expedite_heartbeat_retries = 0; return; + err_parse: err = rkbuf->rkbuf_err; + err: rkcg->rkcg_last_heartbeat_err = err; - - rd_kafka_dbg( - rkcg->rkcg_rk, CGRP, "HEARTBEAT", - "Group \"%s\" heartbeat error response in " - "state %s (join-state %s, %d partition(s) assigned): %s", - rkcg->rkcg_group_id->str, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, - rd_kafka_err2str(err)); - - if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { - rd_kafka_dbg( - rkcg->rkcg_rk, CGRP, "HEARTBEAT", - "Heartbeat response: discarding outdated " - "request (now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); - return; - } + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; switch (err) { case RD_KAFKA_RESP_ERR__DESTROY: /* quick cleanup */ return; + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "ConsumerGroupHeartbeat failed due to coordinator (%s) " + "loading in progress: %s: " + "retrying", + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none", + rd_kafka_err2str(err)); + actions = RD_KAFKA_ERR_ACTION_RETRY; + break; + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: case RD_KAFKA_RESP_ERR__TRANSPORT: + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "ConsumerGroupHeartbeat failed due to coordinator (%s) " + "no longer available: %s: " + "re-querying for coordinator", + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none", + rd_kafka_err2str(err)); + /* Remain in joined state and keep querying for coordinator */ + actions = RD_KAFKA_ERR_ACTION_REFRESH; + break; + + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + case RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH: rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", - "Heartbeat failed due to coordinator (%s) " + "ConsumerGroupHeartbeat failed due to: %s: " + "will rejoin the group", + rd_kafka_err2str(err)); + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN; + return; + + case RD_KAFKA_RESP_ERR_INVALID_REQUEST: + case RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED: + case RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR: + case RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION: + case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: + case RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID: + case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED: + actions = RD_KAFKA_ERR_ACTION_FATAL; + break; + default: + actions = rd_kafka_err_action(rkb, err, request, + RD_KAFKA_ERR_ACTION_END); + break; + } + + if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + rd_kafka_set_fatal_error( + rkcg->rkcg_rk, err, + "ConsumerGroupHeartbeat fatal error: %s", + rd_kafka_err2str(err)); + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true, /*assignments lost*/ + rd_true, /*initiating*/ + "Fatal error in ConsumerGroupHeartbeat API response"); + return; + } + + if (!rkcg->rkcg_heartbeat_intvl_ms) { + /* When an error happens on first HB, it should be always + * retried, unless fatal, to avoid entering a tight loop + * and to use exponential backoff. */ + actions |= RD_KAFKA_ERR_ACTION_RETRY; + } + + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { + /* Re-query for coordinator */ + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST; + rd_kafka_cgrp_coord_query(rkcg, rd_kafka_err2str(err)); + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "coordinator query"); + } + + if (actions & RD_KAFKA_ERR_ACTION_RETRY && + rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && + rd_kafka_buf_retry(rkb, request)) { + /* Retry */ + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + } +} + + +/** + * @brief Handle Heartbeat response. + */ +void rd_kafka_cgrp_handle_Heartbeat(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode = 0; + int actions = 0; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; + + rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT); + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + + rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (err) + goto err; + + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + if (ErrorCode) { + err = ErrorCode; + goto err; + } + + rd_kafka_cgrp_update_session_timeout( + rkcg, rd_false /*don't update if session has expired*/); + + return; + +err_parse: + err = rkbuf->rkbuf_err; +err: + rkcg->rkcg_last_heartbeat_err = err; + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Group \"%s\" heartbeat error response in " + "state %s (join-state %s, %d partition(s) assigned): %s", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + rd_kafka_err2str(err)); + + if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Heartbeat response: discarding outdated " + "request (now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + return; + } + + switch (err) { + case RD_KAFKA_RESP_ERR__DESTROY: + /* quick cleanup */ + return; + + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: + case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR__TRANSPORT: + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "Heartbeat failed due to coordinator (%s) " "no longer available: %s: " "re-querying for coordinator", rkcg->rkcg_curr_coord @@ -2657,6 +3350,9 @@ static void rd_kafka_cgrp_terminated(rd_kafka_cgrp_t *rkcg) { /* Remove cgrp application queue forwarding, if any. */ rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL); + + /* Destroy KIP-848 consumer group structures */ + rd_kafka_cgrp_consumer_reset(rkcg); } @@ -2673,7 +3369,11 @@ static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg) { if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE))) return 0; - /* Check if wait-coord queue has timed out. */ + /* Check if wait-coord queue has timed out. + + FIXME: Remove usage of `group_session_timeout_ms` for the new + consumer group protocol implementation defined in KIP-848. + */ if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 && rkcg->rkcg_ts_terminate + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) < @@ -2844,7 +3544,6 @@ static void rd_kafka_cgrp_partition_del(rd_kafka_cgrp_t *rkcg, static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko, const char *reason) { - /* wait_coord_q is disabled session.timeout.ms after * group close() has been initated. */ if (rko->rko_u.offset_commit.ts_timeout != 0 || @@ -2863,6 +3562,11 @@ static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg, : "none"); rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS; + + /* FIXME: Remove `group_session_timeout_ms` for the new protocol + * defined in KIP-848 as this property is deprecated from client + * side in the new protocol. + */ rko->rko_u.offset_commit.ts_timeout = rd_clock() + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); @@ -2871,6 +3575,45 @@ static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg, return 1; } +/** + * @brief Defer offset commit (rko) until coordinator is available (KIP-848). + * + * @returns 1 if the rko was deferred or 0 if the defer queue is disabled + * or rko already deferred. + */ +static int rd_kafka_cgrp_consumer_defer_offset_commit(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko, + const char *reason) { + /* wait_coord_q is disabled session.timeout.ms after + * group close() has been initated. */ + if ((rko->rko_u.offset_commit.ts_timeout != 0 && + rd_clock() >= rko->rko_u.offset_commit.ts_timeout) || + !rd_kafka_q_ready(rkcg->rkcg_wait_coord_q)) + return 0; + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT", + "Group \"%s\": " + "unable to OffsetCommit in state %s: %s: " + "retrying later", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_state_names[rkcg->rkcg_state], reason); + + rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS; + + if (!rko->rko_u.offset_commit.ts_timeout) { + rko->rko_u.offset_commit.ts_timeout = + rd_clock() + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); + } + + /* Reset partition level error before retrying */ + rd_kafka_topic_partition_list_set_err( + rko->rko_u.offset_commit.partitions, RD_KAFKA_RESP_ERR_NO_ERROR); + + rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko); + + return 1; +} /** * @brief Update the committed offsets for the partitions in \p offsets, @@ -3069,18 +3812,23 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit(rd_kafka_t *rk, rd_kafka_err2str(err)); } - /* * Error handling */ switch (err) { case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: - /* Revoke assignment and rebalance on unknown member */ - rd_kafka_cgrp_set_member_id(rk->rk_cgrp, ""); - rd_kafka_cgrp_revoke_all_rejoin_maybe( - rkcg, rd_true /*assignment is lost*/, - rd_true /*this consumer is initiating*/, - "OffsetCommit error: Unknown member"); + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, "OffsetCommit error: Unknown member"); + } else { + /* Revoke assignment and rebalance on unknown member */ + rd_kafka_cgrp_set_member_id(rk->rk_cgrp, ""); + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "OffsetCommit error: Unknown member"); + } break; case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: @@ -3095,6 +3843,21 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit(rd_kafka_t *rk, case RD_KAFKA_RESP_ERR__IN_PROGRESS: return; /* Retrying */ + case RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH: + /* FIXME: Add logs.*/ + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, "OffsetCommit error: Stale member epoch"); + if (!rd_strcmp(rko_orig->rko_u.offset_commit.reason, "manual")) + /* Don't retry manual commits giving this error. + * TODO: do this in a faster and cleaner way + * with a bool. */ + break; + + if (rd_kafka_cgrp_consumer_defer_offset_commit( + rkcg, rko_orig, rd_kafka_err2str(err))) + return; + break; + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: case RD_KAFKA_RESP_ERR__TRANSPORT: @@ -3386,6 +4149,19 @@ rd_kafka_trigger_waiting_subscribe_maybe(rd_kafka_cgrp_t *rkcg) { return rd_false; } +static void rd_kafka_cgrp_start_max_poll_interval_timer(rd_kafka_cgrp_t *rkcg) { + /* If using subscribe(), start a timer to enforce + * `max.poll.interval.ms`. + * Instead of restarting the timer on each ...poll() + * call, which would be costly (once per message), + * set up an intervalled timer that checks a timestamp + * (that is updated on ..poll()). + * The timer interval is 2 hz. */ + rd_kafka_timer_start( + &rkcg->rkcg_rk->rk_timers, &rkcg->rkcg_max_poll_interval_tmr, + 500 * 1000ll /* 500ms */, + rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg); +} /** * @brief Incrementally add to an existing partition assignment @@ -3408,20 +4184,8 @@ rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg, "incremental assign called"); rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_STEADY); - if (rkcg->rkcg_subscription) { - /* If using subscribe(), start a timer to enforce - * `max.poll.interval.ms`. - * Instead of restarting the timer on each ...poll() - * call, which would be costly (once per message), - * set up an intervalled timer that checks a timestamp - * (that is updated on ..poll()). - * The timer interval is 2 hz. */ - rd_kafka_timer_start( - &rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, - 500 * 1000ll /* 500ms */, - rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg); + rd_kafka_cgrp_start_max_poll_interval_timer(rkcg); } } @@ -3570,6 +4334,11 @@ static void rd_kafka_cgrp_unassign_done(rd_kafka_cgrp_t *rkcg) { * change in the rkcg. */ void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_assignment_done(rkcg); + return; + } + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE", "Group \"%s\": " "assignment operations done in join-state %s " @@ -3641,7 +4410,6 @@ static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg) { return NULL; } - /** * @brief Set new atomic partition assignment * May update \p assignment but will not hold on to it. @@ -3674,20 +4442,8 @@ rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg, rd_kafka_assignment_resume(rkcg->rkcg_rk, "assign called"); rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_STEADY); - if (rkcg->rkcg_subscription) { - /* If using subscribe(), start a timer to enforce - * `max.poll.interval.ms`. - * Instead of restarting the timer on each ...poll() - * call, which would be costly (once per message), - * set up an intervalled timer that checks a timestamp - * (that is updated on ..poll()). - * The timer interval is 2 hz. */ - rd_kafka_timer_start( - &rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, - 500 * 1000ll /* 500ms */, - rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg); + rd_kafka_cgrp_start_max_poll_interval_timer(rkcg); } } @@ -3734,7 +4490,7 @@ rd_kafka_toppar_member_info_map_to_list(map_toppar_member_info_t *map) { rd_kafka_topic_partition_list_new((int)RD_MAP_CNT(map)); RD_MAP_FOREACH_KEY(k, map) { - rd_kafka_topic_partition_list_add(list, k->topic, k->partition); + rd_kafka_topic_partition_list_add_copy(list, k); } return list; @@ -4245,25 +5001,36 @@ rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts, rd_kafka_timer_stop(rkts, &rkcg->rkcg_max_poll_interval_tmr, 1 /*lock*/); - /* Leave the group before calling rebalance since the standard leave - * will be triggered first after the rebalance callback has been served. - * But since the application is blocked still doing processing - * that leave will be further delayed. - * - * KIP-345: static group members should continue to respect - * `max.poll.interval.ms` but should not send a LeaveGroupRequest. - */ - if (!RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg)) - rd_kafka_cgrp_leave(rkcg); - - /* Timing out or leaving the group invalidates the member id, reset it - * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */ - rd_kafka_cgrp_set_member_id(rkcg, ""); + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_leave(rkcg); + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN; + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, + "max poll interval " + "exceeded"); + } else { + /* Leave the group before calling rebalance since the standard + * leave will be triggered first after the rebalance callback + * has been served. But since the application is blocked still + * doing processing that leave will be further delayed. + * + * KIP-345: static group members should continue to respect + * `max.poll.interval.ms` but should not send a + * LeaveGroupRequest. + */ + if (!RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg)) + rd_kafka_cgrp_leave(rkcg); + /* Timing out or leaving the group invalidates the member id, + * reset it now to avoid an ERR_UNKNOWN_MEMBER_ID on the next + * join. */ + rd_kafka_cgrp_set_member_id(rkcg, ""); - /* Trigger rebalance */ - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, - rd_true /*initiating*/, - "max.poll.interval.ms exceeded"); + /* Trigger rebalance */ + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true /*lost*/, rd_true /*initiating*/, + "max.poll.interval.ms exceeded"); + } } @@ -4408,6 +5175,20 @@ rd_kafka_cgrp_calculate_subscribe_revoking_partitions( return revoking; } +static void +rd_kafka_cgrp_subscription_set(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { + rkcg->rkcg_subscription = rktparlist; + if (rkcg->rkcg_subscription) { + /* Insert all non-wildcard topics in cache immediately. + * Otherwise a manual full metadata request could + * not cache the hinted topic and return an + * UNKNOWN_TOPIC_OR_PART error to the user. See #4589. */ + rd_kafka_metadata_cache_hint_rktparlist( + rkcg->rkcg_rk, rkcg->rkcg_subscription, NULL, + 0 /*dont replace*/); + } +} /** * @brief Handle a new subscription that is modifying an existing subscription @@ -4440,7 +5221,7 @@ rd_kafka_cgrp_modify_subscription(rd_kafka_cgrp_t *rkcg, rkcg, unsubscribing_topics); rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription); - rkcg->rkcg_subscription = rktparlist; + rd_kafka_cgrp_subscription_set(rkcg, rktparlist); if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age, "modify subscription") == 1) { @@ -4549,10 +5330,11 @@ static rd_kafka_resp_err_t rd_kafka_cgrp_unsubscribe(rd_kafka_cgrp_t *rkcg, if (rkcg->rkcg_subscription) { rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription); - rkcg->rkcg_subscription = NULL; + rd_kafka_cgrp_subscription_set(rkcg, NULL); } - rd_kafka_cgrp_update_subscribed_topics(rkcg, NULL); + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CLASSIC) + rd_kafka_cgrp_update_subscribed_topics(rkcg, NULL); /* * Clean-up group leader duties, if any. @@ -4574,7 +5356,6 @@ static rd_kafka_resp_err_t rd_kafka_cgrp_unsubscribe(rd_kafka_cgrp_t *rkcg, return RD_KAFKA_RESP_ERR_NO_ERROR; } - /** * Set new atomic topic subscription. */ @@ -4647,7 +5428,7 @@ rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg, if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0) rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; - rkcg->rkcg_subscription = rktparlist; + rd_kafka_cgrp_subscription_set(rkcg, rktparlist); rd_kafka_cgrp_join(rkcg); @@ -4831,8 +5612,21 @@ static void rd_kafka_cgrp_handle_assign_op(rd_kafka_cgrp_t *rkcg, rko->rko_u.assign.partitions); rko->rko_u.assign.partitions = NULL; } + + if (rkcg->rkcg_rebalance_incr_assignment) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_rebalance_incr_assignment); + rkcg->rkcg_rebalance_incr_assignment = NULL; + } + rko->rko_u.assign.method = RD_KAFKA_ASSIGN_METHOD_ASSIGN; + if (rkcg->rkcg_join_state == + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) { + rd_kafka_cgrp_set_join_state( + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); + } + } else if (rd_kafka_cgrp_rebalance_protocol(rkcg) == RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && !(rko->rko_u.assign.method == @@ -4898,309 +5692,538 @@ static void rd_kafka_cgrp_handle_assign_op(rd_kafka_cgrp_t *rkcg, rd_kafka_op_error_reply(rko, error); } - /** - * @brief Handle cgrp queue op. - * @locality rdkafka main thread - * @locks none + * @returns true if the session timeout has expired (due to no successful + * Heartbeats in session.timeout.ms) and triggers a rebalance. */ -static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; - rd_kafka_toppar_t *rktp; - rd_kafka_resp_err_t err; - const int silent_op = rko->rko_type == RD_KAFKA_OP_RECV_BUF; +static rd_bool_t rd_kafka_cgrp_session_timeout_check(rd_kafka_cgrp_t *rkcg, + rd_ts_t now) { + rd_ts_t delta; + char buf[256]; - rktp = rko->rko_rktp; + if (unlikely(!rkcg->rkcg_ts_session_timeout)) + return rd_true; /* Session has expired */ - if (rktp && !silent_op) - rd_kafka_dbg( - rkcg->rkcg_rk, CGRP, "CGRPOP", - "Group \"%.*s\" received op %s in state %s " - "(join-state %s) for %.*s [%" PRId32 "]", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_op2str(rko->rko_type), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], + delta = now - rkcg->rkcg_ts_session_timeout; + if (likely(delta < 0)) + return rd_false; + + delta += rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000; + + rd_snprintf(buf, sizeof(buf), + "Consumer group session timed out (in join-state %s) after " + "%" PRId64 + " ms without a successful response from the " + "group coordinator (broker %" PRId32 ", last error was %s)", rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - else if (!silent_op) - rd_kafka_dbg( - rkcg->rkcg_rk, CGRP, "CGRPOP", - "Group \"%.*s\" received op %s in state %s " - "(join-state %s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_op2str(rko->rko_type), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + delta / 1000, rkcg->rkcg_coord_id, + rd_kafka_err2str(rkcg->rkcg_last_heartbeat_err)); - switch ((int)rko->rko_type) { - case RD_KAFKA_OP_NAME: - /* Return the currently assigned member id. */ - if (rkcg->rkcg_member_id) - rko->rko_u.name.str = - RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id); - rd_kafka_op_reply(rko, 0); - rko = NULL; - break; + rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; - case RD_KAFKA_OP_CG_METADATA: - /* Return the current consumer group metadata. */ - rko->rko_u.cg_metadata = - rkcg->rkcg_member_id - ? rd_kafka_consumer_group_metadata_new_with_genid( - rkcg->rkcg_rk->rk_conf.group_id_str, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id->str, - rkcg->rkcg_rk->rk_conf.group_instance_id) - : NULL; - rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); - rko = NULL; - break; + rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "SESSTMOUT", + "%s: revoking assignment and rejoining group", buf); - case RD_KAFKA_OP_OFFSET_FETCH: - if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) { - rd_kafka_op_handle_OffsetFetch( - rkcg->rkcg_rk, NULL, RD_KAFKA_RESP_ERR__WAIT_COORD, - NULL, NULL, rko); - rko = NULL; /* rko freed by handler */ - break; - } + /* Prevent further rebalances */ + rkcg->rkcg_ts_session_timeout = 0; - rd_kafka_OffsetFetchRequest( - rkcg->rkcg_coord, rk->rk_group_id->str, - rko->rko_u.offset_fetch.partitions, - rko->rko_u.offset_fetch.require_stable_offsets, - 0, /* Timeout */ - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_op_handle_OffsetFetch, rko); - rko = NULL; /* rko now owned by request */ - break; + /* Timing out invalidates the member id, reset it + * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */ + rd_kafka_cgrp_set_member_id(rkcg, ""); - case RD_KAFKA_OP_PARTITION_JOIN: - rd_kafka_cgrp_partition_add(rkcg, rktp); + /* Revoke and rebalance */ + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, buf); - /* If terminating tell the partition to leave */ - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) - rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_NO_REPLYQ); - break; + return rd_true; +} - case RD_KAFKA_OP_PARTITION_LEAVE: - rd_kafka_cgrp_partition_del(rkcg, rktp); - break; - case RD_KAFKA_OP_OFFSET_COMMIT: - /* Trigger offsets commit. */ - rd_kafka_cgrp_offsets_commit(rkcg, rko, - /* only set offsets - * if no partitions were - * specified. */ - rko->rko_u.offset_commit.partitions - ? 0 - : 1 /* set_offsets*/, - rko->rko_u.offset_commit.reason); - rko = NULL; /* rko now owned by request */ - break; +/** + * @brief Apply the next waiting subscribe/unsubscribe, if any. + */ +static void rd_kafka_cgrp_apply_next_subscribe(rd_kafka_cgrp_t *rkcg) { + rd_assert(rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT); - case RD_KAFKA_OP_COORD_QUERY: - rd_kafka_cgrp_coord_query( - rkcg, - rko->rko_err ? rd_kafka_err2str(rko->rko_err) : "from op"); - break; + if (rkcg->rkcg_next_subscription) { + rd_kafka_topic_partition_list_t *next_subscription = + rkcg->rkcg_next_subscription; + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", + "Group \"%s\": invoking waiting postponed " + "subscribe", + rkcg->rkcg_group_id->str); + rkcg->rkcg_next_subscription = NULL; + rd_kafka_cgrp_subscribe(rkcg, next_subscription); - case RD_KAFKA_OP_SUBSCRIBE: - rd_kafka_app_polled(rk); + } else if (rkcg->rkcg_next_unsubscribe) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", + "Group \"%s\": invoking waiting postponed " + "unsubscribe", + rkcg->rkcg_group_id->str); + rkcg->rkcg_next_unsubscribe = rd_false; + rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*Leave*/); + } +} - /* New atomic subscription (may be NULL) */ - err = - rd_kafka_cgrp_subscribe(rkcg, rko->rko_u.subscribe.topics); +/** + * Client group's join state handling + */ +static void rd_kafka_cgrp_join_state_serve(rd_kafka_cgrp_t *rkcg) { + rd_ts_t now = rd_clock(); - if (!err) /* now owned by rkcg */ - rko->rko_u.subscribe.topics = NULL; + if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk))) + return; - rd_kafka_op_reply(rko, err); - rko = NULL; + switch (rkcg->rkcg_join_state) { + case RD_KAFKA_CGRP_JOIN_STATE_INIT: + if (unlikely(rd_kafka_cgrp_awaiting_response(rkcg))) + break; + + /* If there is a next subscription, apply it. */ + rd_kafka_cgrp_apply_next_subscribe(rkcg); + + /* If we have a subscription start the join process. */ + if (!rkcg->rkcg_subscription) + break; + + if (rd_interval_immediate(&rkcg->rkcg_join_intvl, 1000 * 1000, + now) > 0) + rd_kafka_cgrp_join(rkcg); break; - case RD_KAFKA_OP_ASSIGN: - rd_kafka_cgrp_handle_assign_op(rkcg, rko); - rko = NULL; + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + /* FIXME: I think we might have to send heartbeats in + * in WAIT_INCR_UNASSIGN, yes-no? */ + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: break; - case RD_KAFKA_OP_GET_SUBSCRIPTION: - if (rkcg->rkcg_next_subscription) - rko->rko_u.subscribe.topics = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_next_subscription); - else if (rkcg->rkcg_next_unsubscribe) - rko->rko_u.subscribe.topics = NULL; - else if (rkcg->rkcg_subscription) - rko->rko_u.subscribe.topics = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_subscription); - rd_kafka_op_reply(rko, 0); - rko = NULL; + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL: + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && + rd_interval( + &rkcg->rkcg_heartbeat_intvl, + rkcg->rkcg_rk->rk_conf.group_heartbeat_intvl_ms * 1000, + now) > 0) + rd_kafka_cgrp_heartbeat(rkcg); break; + } +} - case RD_KAFKA_OP_GET_ASSIGNMENT: - /* This is the consumer assignment, not the group assignment. */ - rko->rko_u.assign.partitions = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_rk->rk_consumer.assignment.all); +void rd_kafka_cgrp_consumer_group_heartbeat(rd_kafka_cgrp_t *rkcg, + rd_bool_t full_request, + rd_bool_t send_ack) { - rd_kafka_op_reply(rko, 0); - rko = NULL; - break; + rd_kafkap_str_t *rkcg_group_instance_id = NULL; + rd_kafkap_str_t *rkcg_client_rack = NULL; + int max_poll_interval_ms = -1; + rd_kafka_topic_partition_list_t *rkcg_subscription = NULL; + rd_kafkap_str_t *rkcg_group_remote_assignor = NULL; + rd_kafka_topic_partition_list_t *rkcg_group_assignment = NULL; + int32_t member_epoch = rkcg->rkcg_generation_id; + if (member_epoch < 0) + member_epoch = 0; - case RD_KAFKA_OP_GET_REBALANCE_PROTOCOL: - rko->rko_u.rebalance_protocol.str = - rd_kafka_rebalance_protocol2str( - rd_kafka_cgrp_rebalance_protocol(rkcg)); - rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); - rko = NULL; - break; - case RD_KAFKA_OP_TERMINATE: - rd_kafka_cgrp_terminate0(rkcg, rko); - rko = NULL; /* terminate0() takes ownership */ - break; + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED; + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; - default: - rd_kafka_assert(rkcg->rkcg_rk, !*"unknown type"); - break; + if (full_request) { + rkcg_group_instance_id = rkcg->rkcg_group_instance_id; + rkcg_client_rack = rkcg->rkcg_client_rack; + max_poll_interval_ms = + rkcg->rkcg_rk->rk_conf.max_poll_interval_ms; + rkcg_subscription = rkcg->rkcg_subscription; + rkcg_group_remote_assignor = rkcg->rkcg_group_remote_assignor; } - if (rko) - rd_kafka_op_destroy(rko); + if (send_ack) { + rkcg_group_assignment = rkcg->rkcg_target_assignment; + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK; - return RD_KAFKA_OP_RES_HANDLED; + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_group_assignment_str[512] = "NULL"; + + if (rkcg_group_assignment) { + rd_kafka_topic_partition_list_str( + rkcg_group_assignment, + rkcg_group_assignment_str, + sizeof(rkcg_group_assignment_str), 0); + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Acknowledging target assignment \"%s\"", + rkcg_group_assignment_str); + } + } else if (full_request) { + rkcg_group_assignment = rkcg->rkcg_current_assignment; + } + + if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY && + (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION || + rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION)) { + rkcg->rkcg_consumer_flags = + (rkcg->rkcg_consumer_flags & + ~RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION) | + RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION; + rkcg_subscription = rkcg->rkcg_subscription; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_new_subscription_str[512] = "NULL"; + + if (rkcg_subscription) { + rd_kafka_topic_partition_list_str( + rkcg_subscription, + rkcg_new_subscription_str, + sizeof(rkcg_new_subscription_str), 0); + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Sending new subscription \"%s\"", + rkcg_new_subscription_str); + } + } + + rkcg->rkcg_expedite_heartbeat_retries++; + rd_kafka_ConsumerGroupHeartbeatRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id, + member_epoch, rkcg_group_instance_id, rkcg_client_rack, + max_poll_interval_ms, rkcg_subscription, rkcg_group_remote_assignor, + rkcg_group_assignment, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_ConsumerGroupHeartbeat, NULL); } +static rd_bool_t +rd_kafka_cgrp_consumer_heartbeat_preconditions_met(rd_kafka_cgrp_t *rkcg) { + if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION)) + return rd_false; -/** - * @returns true if the session timeout has expired (due to no successful - * Heartbeats in session.timeout.ms) and triggers a rebalance. - */ -static rd_bool_t rd_kafka_cgrp_session_timeout_check(rd_kafka_cgrp_t *rkcg, - rd_ts_t now) { - rd_ts_t delta; - char buf[256]; + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT) + return rd_false; - if (unlikely(!rkcg->rkcg_ts_session_timeout)) - return rd_true; /* Session has expired */ + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE) + return rd_false; - delta = now - rkcg->rkcg_ts_session_timeout; - if (likely(delta < 0)) + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED && + rd_kafka_max_poll_exceeded(rkcg->rkcg_rk)) return rd_false; - delta += rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000; + return rd_true; +} - rd_snprintf(buf, sizeof(buf), - "Consumer group session timed out (in join-state %s) after " - "%" PRId64 - " ms without a successful response from the " - "group coordinator (broker %" PRId32 ", last error was %s)", - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - delta / 1000, rkcg->rkcg_coord_id, - rd_kafka_err2str(rkcg->rkcg_last_heartbeat_err)); +void rd_kafka_cgrp_consumer_serve(rd_kafka_cgrp_t *rkcg) { + rd_bool_t full_request = rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST; + rd_bool_t send_ack = rd_false; - rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; + if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk))) + return; - rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "SESSTMOUT", - "%s: revoking assignment and rejoining group", buf); + if (unlikely(rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN)) { + if (RD_KAFKA_CGRP_REBALANCING(rkcg)) + return; + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN; + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE; - /* Prevent further rebalances */ - rkcg->rkcg_ts_session_timeout = 0; + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Revoking assignment as lost an rejoining in join state %s", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); - /* Timing out invalidates the member id, reset it - * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */ - rd_kafka_cgrp_set_member_id(rkcg, ""); + rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_true, rd_true, + "member fenced - rejoining"); + } - /* Revoke and rebalance */ - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, - rd_true /*initiating*/, buf); + switch (rkcg->rkcg_join_state) { + case RD_KAFKA_CGRP_JOIN_STATE_INIT: + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE; + full_request = rd_true; + break; + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK) { + send_ack = rd_true; + } + break; + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + break; + default: + rd_assert(!*"unexpected state"); + } - return rd_true; + if (rd_kafka_cgrp_consumer_heartbeat_preconditions_met(rkcg)) { + rd_ts_t next_heartbeat = + rd_interval(&rkcg->rkcg_heartbeat_intvl, + rkcg->rkcg_heartbeat_intvl_ms * 1000, 0); + if (next_heartbeat > 0) { + rd_kafka_cgrp_consumer_group_heartbeat( + rkcg, full_request, send_ack); + next_heartbeat = rkcg->rkcg_heartbeat_intvl_ms * 1000; + } else { + next_heartbeat = -1 * next_heartbeat; + } + if (likely(rkcg->rkcg_heartbeat_intvl_ms > 0)) { + if (rkcg->rkcg_serve_timer.rtmr_next > + (rd_clock() + next_heartbeat)) { + /* We stop the timer if it expires later + * than expected and restart it below. */ + rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers, + &rkcg->rkcg_serve_timer, 0); + } + + /* Scheduling a timer yields the main loop so + * 'restart' has to be set to false to avoid a tight + * loop. */ + rd_kafka_timer_start_oneshot( + &rkcg->rkcg_rk->rk_timers, &rkcg->rkcg_serve_timer, + rd_false /*don't restart*/, next_heartbeat, + rd_kafka_cgrp_serve_timer_cb, NULL); + } + } } +/** + * Set new atomic topic subscription (KIP-848). + * + * @locality rdkafka main thread + * @locks none + */ +static rd_kafka_resp_err_t +rd_kafka_cgrp_consumer_subscribe(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + "Group \"%.*s\": subscribe to new %ssubscription " + "of %d topics (join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rktparlist ? "" : "unset ", + rktparlist ? rktparlist->cnt : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + /* If the consumer has raised a fatal error treat all subscribes as + unsubscribe */ + if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { + if (rkcg->rkcg_subscription) + rd_kafka_cgrp_unsubscribe(rkcg, + rd_true /*leave group*/); + return RD_KAFKA_RESP_ERR__FATAL; + } + + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; + if (rktparlist) { + if (rkcg->rkcg_subscription) + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_subscription); + + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_SUBSCRIPTION; + + if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0) + rkcg->rkcg_flags |= + RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; + + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE | + RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION; + + rd_kafka_cgrp_subscription_set(rkcg, rktparlist); + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "subscription changed"); + } else { + rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*leave group*/); + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} /** - * @brief Apply the next waiting subscribe/unsubscribe, if any. + * @brief Call when all incremental unassign operations are done to transition + * to the next state. */ -static void rd_kafka_cgrp_apply_next_subscribe(rd_kafka_cgrp_t *rkcg) { - rd_assert(rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT); +static void rd_kafka_cgrp_consumer_incr_unassign_done(rd_kafka_cgrp_t *rkcg) { - if (rkcg->rkcg_next_subscription) { - rd_kafka_topic_partition_list_t *next_subscription = - rkcg->rkcg_next_subscription; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", - "Group \"%s\": invoking waiting postponed " - "subscribe", + /* If this action was underway when a terminate was initiated, it will + * be left to complete. Now that's done, unassign all partitions */ + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", + "Group \"%s\" is terminating, initiating full " + "unassign", rkcg->rkcg_group_id->str); - rkcg->rkcg_next_subscription = NULL; - rd_kafka_cgrp_subscribe(rkcg, next_subscription); + rd_kafka_cgrp_unassign(rkcg); + return; + } - } else if (rkcg->rkcg_next_unsubscribe) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", - "Group \"%s\": invoking waiting postponed " - "unsubscribe", - rkcg->rkcg_group_id->str); - rkcg->rkcg_next_unsubscribe = rd_false; - rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*Leave*/); + if (rkcg->rkcg_rebalance_incr_assignment) { + /* This incremental unassign was part of a normal rebalance + * (in which the revoke set was not empty). Immediately + * trigger the assign that follows this revoke. The protocol + * dictates this should occur even if the new assignment + * set is empty. + * + * Also, since this rebalance had some revoked partitions, + * a re-join should occur following the assign. + */ + + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rkcg->rkcg_rebalance_incr_assignment, + rd_false /* don't rejoin following assign*/, + "cooperative assign after revoke"); + + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_rebalance_incr_assignment); + rkcg->rkcg_rebalance_incr_assignment = NULL; + + /* Note: rkcg_rebalance_rejoin is actioned / reset in + * rd_kafka_cgrp_incremental_assign call */ + + } else if (rkcg->rkcg_rebalance_rejoin) { + rkcg->rkcg_rebalance_rejoin = rd_false; + + /* There are some cases (lost partitions), where a rejoin + * should occur immediately following the unassign (this + * is not the case under normal conditions), in which case + * the rejoin flag will be set. */ + + rd_kafka_cgrp_rejoin(rkcg, "Incremental unassignment done"); + + } else { + /* After this incremental unassignment we're now back in + * a steady state. */ + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + if (rkcg->rkcg_subscription) { + rd_kafka_cgrp_start_max_poll_interval_timer(rkcg); + } } } -/** - * Client group's join state handling - */ -static void rd_kafka_cgrp_join_state_serve(rd_kafka_cgrp_t *rkcg) { - rd_ts_t now = rd_clock(); +/** + * @brief KIP 848: Called from assignment code when all in progress + * assignment/unassignment operations are done, allowing the cgrp to + * transition to other states if needed. + * + * @param rkcg Consumer group. + * + * @remark This may be called spontaneously without any need for a state + * change in the rkcg. + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_cgrp_consumer_assignment_done(rd_kafka_cgrp_t *rkcg) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE", + "Group \"%s\": " + "assignment operations done in join-state %s " + "(rebalance rejoin=%s)", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_STR_ToF(rkcg->rkcg_rebalance_rejoin)); + + switch (rkcg->rkcg_join_state) { + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + rd_kafka_cgrp_unassign_done(rkcg); + break; - if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk))) - return; + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + rd_kafka_cgrp_consumer_incr_unassign_done(rkcg); + break; - switch (rkcg->rkcg_join_state) { - case RD_KAFKA_CGRP_JOIN_STATE_INIT: - if (unlikely(rd_kafka_cgrp_awaiting_response(rkcg))) + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "back to steady state"); + + if (rkcg->rkcg_rebalance_rejoin) { + rkcg->rkcg_rebalance_rejoin = rd_false; + rd_kafka_cgrp_rejoin( + rkcg, + "rejoining group to redistribute " + "previously owned partitions to other " + "group members"); break; + } - /* If there is a next subscription, apply it. */ - rd_kafka_cgrp_apply_next_subscribe(rkcg); + /* FALLTHRU */ - /* If we have a subscription start the join process. */ - if (!rkcg->rkcg_subscription) - break; + case RD_KAFKA_CGRP_JOIN_STATE_INIT: { + rd_bool_t still_in_group = rd_true; + /* + * There maybe a case when there are no assignments are + * assigned to this consumer. In this case, while terminating + * the consumer can be in STEADY or INIT state and won't go + * to intermediate state. In this scenario, last leave call is + * done from here. + */ + still_in_group &= !rd_kafka_cgrp_leave_maybe(rkcg); - if (rd_interval_immediate(&rkcg->rkcg_join_intvl, 1000 * 1000, - now) > 0) - rd_kafka_cgrp_join(rkcg); - break; + /* Check if cgrp is trying to terminate, which is safe to do + * in these two states. Otherwise we'll need to wait for + * the current state to decommission. */ + still_in_group &= !rd_kafka_cgrp_try_terminate(rkcg); - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: - /* FIXME: I think we might have to send heartbeats in - * in WAIT_INCR_UNASSIGN, yes-no? */ - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + if (still_in_group) + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "back to init state"); break; - - case RD_KAFKA_CGRP_JOIN_STATE_STEADY: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL: - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && - rd_interval( - &rkcg->rkcg_heartbeat_intvl, - rkcg->rkcg_rk->rk_conf.group_heartbeat_intvl_ms * 1000, - now) > 0) - rd_kafka_cgrp_heartbeat(rkcg); + } + default: break; } } + +void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg, + const char *reason) { + if (rkcg->rkcg_group_protocol != RD_KAFKA_GROUP_PROTOCOL_CONSUMER) + return; + + rd_kafka_t *rk = rkcg->rkcg_rk; + /* Calculate the exponential backoff. */ + int64_t backoff = 0; + if (rkcg->rkcg_expedite_heartbeat_retries) + backoff = 1 << (rkcg->rkcg_expedite_heartbeat_retries - 1); + + /* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 -> + * backoff_ms * jitter * 10 */ + backoff = rd_jitter(100 - RD_KAFKA_RETRY_JITTER_PERCENT, + 100 + RD_KAFKA_RETRY_JITTER_PERCENT) * + backoff * 10; + + /* Backoff is limited by retry_backoff_max_ms. */ + if (backoff > rk->rk_conf.retry_backoff_max_ms * 1000) + backoff = rk->rk_conf.retry_backoff_max_ms * 1000; + + /* Reset the interval as it happened `rkcg_heartbeat_intvl_ms` + * milliseconds ago. */ + rd_interval_reset_to_now(&rkcg->rkcg_heartbeat_intvl, + rd_clock() - + rkcg->rkcg_heartbeat_intvl_ms * 1000); + /* Set the exponential backoff. */ + rd_interval_backoff(&rkcg->rkcg_heartbeat_intvl, backoff); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Expediting next heartbeat" + ", with backoff %" PRId64 ": %s", + backoff, reason); + + /* Scheduling the timer awakes main loop too. */ + rd_kafka_timer_start_oneshot(&rkcg->rkcg_rk->rk_timers, + &rkcg->rkcg_serve_timer, rd_true, backoff, + rd_kafka_cgrp_serve_timer_cb, NULL); +} + /** * Client group handling. * Called from main thread to serve the operational aspects of a cgrp. @@ -5294,9 +6317,15 @@ void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg) { rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_UP); /* Serve join state to trigger (re)join */ - rd_kafka_cgrp_join_state_serve(rkcg); + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_serve(rkcg); + } else { + rd_kafka_cgrp_join_state_serve(rkcg); + } - /* Serve any pending partitions in the assignment */ + /* Serve any pending partitions in the + * assignment */ rd_kafka_assignment_serve(rkcg->rkcg_rk); } break; @@ -5314,7 +6343,13 @@ void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg) { rd_kafka_cgrp_coord_query(rkcg, "intervaled in state up"); - rd_kafka_cgrp_join_state_serve(rkcg); + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_serve(rkcg); + } else { + rd_kafka_cgrp_join_state_serve(rkcg); + } + break; } @@ -5348,7 +6383,192 @@ void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg, rd_kafka_q_enq(rkcg->rkcg_ops, rko); } +/** + * @brief Handle cgrp queue op. + * @locality rdkafka main thread + * @locks none + */ +static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; + rd_kafka_toppar_t *rktp; + rd_kafka_resp_err_t err; + const int silent_op = rko->rko_type == RD_KAFKA_OP_RECV_BUF; + + rktp = rko->rko_rktp; + + if (rktp && !silent_op) + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPOP", + "Group \"%.*s\" received op %s in state %s " + "(join-state %s) for %.*s [%" PRId32 "]", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_op2str(rko->rko_type), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + else if (!silent_op) + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPOP", + "Group \"%.*s\" received op %s in state %s " + "(join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_op2str(rko->rko_type), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + switch ((int)rko->rko_type) { + case RD_KAFKA_OP_NAME: + /* Return the currently assigned member id. */ + if (rkcg->rkcg_member_id) + rko->rko_u.name.str = + RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id); + rd_kafka_op_reply(rko, 0); + rko = NULL; + break; + + case RD_KAFKA_OP_CG_METADATA: + /* Return the current consumer group metadata. */ + rko->rko_u.cg_metadata = + rkcg->rkcg_member_id + ? rd_kafka_consumer_group_metadata_new_with_genid( + rkcg->rkcg_rk->rk_conf.group_id_str, + rkcg->rkcg_generation_id, + rkcg->rkcg_member_id->str, + rkcg->rkcg_rk->rk_conf.group_instance_id) + : NULL; + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); + rko = NULL; + break; + + case RD_KAFKA_OP_OFFSET_FETCH: + if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) { + rd_kafka_op_handle_OffsetFetch( + rkcg->rkcg_rk, NULL, RD_KAFKA_RESP_ERR__WAIT_COORD, + NULL, NULL, rko); + rko = NULL; /* rko freed by handler */ + break; + } + + rd_kafka_OffsetFetchRequest( + rkcg->rkcg_coord, rk->rk_group_id->str, + rko->rko_u.offset_fetch.partitions, rd_false, -1, NULL, + rko->rko_u.offset_fetch.require_stable_offsets, + 0, /* Timeout */ + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_op_handle_OffsetFetch, rko); + rko = NULL; /* rko now owned by request */ + break; + + case RD_KAFKA_OP_PARTITION_JOIN: + rd_kafka_cgrp_partition_add(rkcg, rktp); + + /* If terminating tell the partition to leave */ + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) + rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_NO_REPLYQ); + break; + + case RD_KAFKA_OP_PARTITION_LEAVE: + rd_kafka_cgrp_partition_del(rkcg, rktp); + break; + + case RD_KAFKA_OP_OFFSET_COMMIT: + /* Trigger offsets commit. */ + rd_kafka_cgrp_offsets_commit(rkcg, rko, + /* only set offsets + * if no partitions were + * specified. */ + rko->rko_u.offset_commit.partitions + ? 0 + : 1 /* set_offsets*/, + rko->rko_u.offset_commit.reason); + rko = NULL; /* rko now owned by request */ + break; + + case RD_KAFKA_OP_COORD_QUERY: + rd_kafka_cgrp_coord_query( + rkcg, + rko->rko_err ? rd_kafka_err2str(rko->rko_err) : "from op"); + break; + + case RD_KAFKA_OP_SUBSCRIBE: + rd_kafka_app_polled(rk); + + /* New atomic subscription (may be NULL) */ + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + err = rd_kafka_cgrp_consumer_subscribe( + rkcg, rko->rko_u.subscribe.topics); + } else { + err = rd_kafka_cgrp_subscribe( + rkcg, rko->rko_u.subscribe.topics); + } + + if (!err) /* now owned by rkcg */ + rko->rko_u.subscribe.topics = NULL; + + rd_kafka_op_reply(rko, err); + rko = NULL; + break; + + case RD_KAFKA_OP_ASSIGN: + rd_kafka_cgrp_handle_assign_op(rkcg, rko); + rko = NULL; + break; + + case RD_KAFKA_OP_GET_SUBSCRIPTION: + if (rkcg->rkcg_next_subscription) + rko->rko_u.subscribe.topics = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_next_subscription); + else if (rkcg->rkcg_next_unsubscribe) + rko->rko_u.subscribe.topics = NULL; + else if (rkcg->rkcg_subscription) + rko->rko_u.subscribe.topics = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_subscription); + rd_kafka_op_reply(rko, 0); + rko = NULL; + break; + + case RD_KAFKA_OP_GET_ASSIGNMENT: + /* This is the consumer assignment, not the group assignment. */ + rko->rko_u.assign.partitions = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_rk->rk_consumer.assignment.all); + + rd_kafka_op_reply(rko, 0); + rko = NULL; + break; + + case RD_KAFKA_OP_GET_REBALANCE_PROTOCOL: + rko->rko_u.rebalance_protocol.str = + rd_kafka_rebalance_protocol2str( + rd_kafka_cgrp_rebalance_protocol(rkcg)); + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); + rko = NULL; + break; + + case RD_KAFKA_OP_TERMINATE: + rd_kafka_cgrp_terminate0(rkcg, rko); + rko = NULL; /* terminate0() takes ownership */ + break; + + default: + rd_kafka_assert(rkcg->rkcg_rk, !*"unknown type"); + break; + } + + if (rko) + rd_kafka_op_destroy(rko); + return RD_KAFKA_OP_RES_HANDLED; +} void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id) { if (rkcg->rkcg_member_id && member_id && @@ -5417,6 +6637,9 @@ void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread)); + if (rkcg->rkcg_group_protocol != RD_KAFKA_GROUP_PROTOCOL_CLASSIC) + return; + if (!rkcg->rkcg_subscription || rkcg->rkcg_subscription->cnt == 0) return; @@ -5487,7 +6710,8 @@ void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, owned_but_not_exist, rkcg->rkcg_group_leader.members != NULL /* Rejoin group following revoke's - * unassign if we are leader */ + * unassign if we are leader and consumer + * group protocol is GENERIC */ , "topics not available"); rd_kafka_topic_partition_list_destroy( @@ -5567,6 +6791,11 @@ rd_kafka_consumer_group_metadata(rd_kafka_t *rk) { return cgmetadata; } +const char *rd_kafka_consumer_group_metadata_member_id( + const rd_kafka_consumer_group_metadata_t *group_metadata) { + return group_metadata->member_id; +} + void rd_kafka_consumer_group_metadata_destroy( rd_kafka_consumer_group_metadata_t *cgmetadata) { rd_free(cgmetadata->group_id); diff --git a/lib/librdkafka-2.3.0/src/rdkafka_cgrp.h b/lib/librdkafka-2.4.0/src/rdkafka_cgrp.h similarity index 85% rename from lib/librdkafka-2.3.0/src/rdkafka_cgrp.h rename to lib/librdkafka-2.4.0/src/rdkafka_cgrp.h index ff62e8d2852..afb671f02a1 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_cgrp.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_cgrp.h @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -56,6 +57,7 @@ typedef struct rd_kafka_cgrp_s { rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */ rd_kafkap_str_t *rkcg_group_instance_id; const rd_kafkap_str_t *rkcg_client_id; + rd_kafkap_str_t *rkcg_client_rack; enum { /* Init state */ @@ -163,6 +165,10 @@ typedef struct rd_kafka_cgrp_s { rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/ rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */ + rd_kafka_timer_t rkcg_serve_timer; /* Timer for next serve. */ + int rkcg_heartbeat_intvl_ms; /* KIP 848: received + * heartbeat interval in + * milliseconds */ rd_interval_t rkcg_join_intvl; /* JoinGroup interval */ rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */ @@ -179,7 +185,8 @@ typedef struct rd_kafka_cgrp_s { rd_list_t rkcg_toppars; /* Toppars subscribed to*/ - int32_t rkcg_generation_id; /* Current generation id */ + int32_t rkcg_generation_id; /* Current generation id (classic) + * or member epoch (consumer). */ rd_kafka_assignor_t *rkcg_assignor; /**< The current partition * assignor. used by both @@ -190,6 +197,12 @@ typedef struct rd_kafka_cgrp_s { int32_t rkcg_coord_id; /**< Current coordinator id, * or -1 if not known. */ + rd_kafka_group_protocol_t + rkcg_group_protocol; /**< Group protocol to use */ + + rd_kafkap_str_t *rkcg_group_remote_assignor; /**< Group remote + * assignor to use */ + rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator * broker handle, or NULL. * rkcg_coord's nodename is @@ -255,6 +268,46 @@ typedef struct rd_kafka_cgrp_s { * currently in-progress incremental unassign. */ rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment; + /** Current acked assignment, start with an empty list. */ + rd_kafka_topic_partition_list_t *rkcg_current_assignment; + + /** Assignment the is currently reconciling. + * Can be NULL in case there's no reconciliation ongoing. */ + rd_kafka_topic_partition_list_t *rkcg_target_assignment; + + /** Next assignment that will be reconciled once current + * reconciliation finishes. Can be NULL. */ + rd_kafka_topic_partition_list_t *rkcg_next_target_assignment; + + /** Number of backoff retries when expediting next heartbeat. */ + int rkcg_expedite_heartbeat_retries; + + /** Flags for KIP-848 state machine. */ + int rkcg_consumer_flags; +/** Coordinator is waiting for an acknowledgement of currently reconciled + * target assignment. Cleared when an HB succeeds + * after reconciliation finishes. */ +#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK 0x1 +/** Member is sending an acknowledgement for a reconciled assignment */ +#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK 0x2 +/** A new subscription needs to be sent to the Coordinator. */ +#define RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION 0x4 +/** A new subscription is being sent to the Coordinator. */ +#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION 0x8 +/** Consumer has subscribed at least once, + * if it didn't happen rebalance protocol is still + * considered NONE, otherwise it depends on the + * configured partition assignors. */ +#define RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE 0x10 +/** Send a complete request in next heartbeat */ +#define RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST 0x20 +/** Member is fenced, need to rejoin */ +#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN 0x40 +/** Member is fenced, rejoining */ +#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE 0x80 +/** Serve pending assignments after heartbeat */ +#define RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING 0x100 + /** Rejoin the group following a currently in-progress * incremental unassign. */ rd_bool_t rkcg_rebalance_rejoin; @@ -313,6 +366,7 @@ extern const char *rd_kafka_cgrp_join_state_names[]; void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg); rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, + rd_kafka_group_protocol_t group_protocol, const rd_kafkap_str_t *group_id, const rd_kafkap_str_t *client_id); void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg); @@ -380,4 +434,7 @@ rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) { } } +void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg, + const char *reason); + #endif /* _RDKAFKA_CGRP_H_ */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_conf.c b/lib/librdkafka-2.4.0/src/rdkafka_conf.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_conf.c rename to lib/librdkafka-2.4.0/src/rdkafka_conf.c index 154582d6fcc..8244b4a0416 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_conf.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_conf.c @@ -1132,9 +1132,26 @@ static const struct rd_kafka_property rd_kafka_properties[] = { "Group session keepalive heartbeat interval.", 1, 3600 * 1000, 3 * 1000}, {_RK_GLOBAL | _RK_CGRP, "group.protocol.type", _RK_C_KSTR, _RK(group_protocol_type), - "Group protocol type. NOTE: Currently, the only supported group " + "Group protocol type for the `classic` group protocol. NOTE: Currently, " + "the only supported group " "protocol type is `consumer`.", .sdef = "consumer"}, + {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.protocol", _RK_C_S2I, + _RK(group_protocol), + "Group protocol to use. Use `classic` for the original protocol and " + "`consumer` for the new " + "protocol introduced in KIP-848. Available protocols: classic or " + "consumer. Default is `classic`, " + "but will change to `consumer` in next releases.", + .vdef = RD_KAFKA_GROUP_PROTOCOL_CLASSIC, + .s2i = {{RD_KAFKA_GROUP_PROTOCOL_CLASSIC, "classic"}, + {RD_KAFKA_GROUP_PROTOCOL_CONSUMER, "consumer"}}}, + {_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.remote.assignor", _RK_C_STR, + _RK(group_remote_assignor), + "Server side assignor to use. Keep it null to make server select a " + "suitable assignor for the group. " + "Available assignors: uniform or range. Default is null", + .sdef = NULL}, {_RK_GLOBAL | _RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT, _RK(coord_query_intvl_ms), "How often to query for the current client group coordinator. " diff --git a/lib/librdkafka-2.3.0/src/rdkafka_conf.h b/lib/librdkafka-2.4.0/src/rdkafka_conf.h similarity index 98% rename from lib/librdkafka-2.3.0/src/rdkafka_conf.h rename to lib/librdkafka-2.4.0/src/rdkafka_conf.h index bd17a261bf8..ccc95947a28 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_conf.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_conf.h @@ -163,6 +163,11 @@ typedef enum { RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY, } rd_kafka_client_dns_lookup_t; +typedef enum { + RD_KAFKA_GROUP_PROTOCOL_CLASSIC, + RD_KAFKA_GROUP_PROTOCOL_CONSUMER, +} rd_kafka_group_protocol_t; + /* Increase in steps of 64 as needed. * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */ #define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 33) @@ -363,8 +368,10 @@ struct rd_kafka_conf_s { int fetch_min_bytes; int fetch_queue_backoff_ms; int fetch_error_backoff_ms; + rd_kafka_group_protocol_t group_protocol; char *group_id_str; char *group_instance_id; + char *group_remote_assignor; int allow_auto_create_topics; rd_kafka_pattern_list_t *topic_blacklist; @@ -379,6 +386,7 @@ struct rd_kafka_conf_s { rd_kafkap_str_t *group_protocol_type; char *partition_assignment_strategy; rd_list_t partition_assignors; + rd_bool_t partition_assignors_cooperative; int enabled_assignor_cnt; void (*rebalance_cb)(rd_kafka_t *rk, diff --git a/lib/librdkafka-2.3.0/src/rdkafka_confval.h b/lib/librdkafka-2.4.0/src/rdkafka_confval.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_confval.h rename to lib/librdkafka-2.4.0/src/rdkafka_confval.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_coord.c b/lib/librdkafka-2.4.0/src/rdkafka_coord.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_coord.c rename to lib/librdkafka-2.4.0/src/rdkafka_coord.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_coord.h b/lib/librdkafka-2.4.0/src/rdkafka_coord.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_coord.h rename to lib/librdkafka-2.4.0/src/rdkafka_coord.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_error.c b/lib/librdkafka-2.4.0/src/rdkafka_error.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_error.c rename to lib/librdkafka-2.4.0/src/rdkafka_error.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_error.h b/lib/librdkafka-2.4.0/src/rdkafka_error.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_error.h rename to lib/librdkafka-2.4.0/src/rdkafka_error.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_event.c b/lib/librdkafka-2.4.0/src/rdkafka_event.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_event.c rename to lib/librdkafka-2.4.0/src/rdkafka_event.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_event.h b/lib/librdkafka-2.4.0/src/rdkafka_event.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_event.h rename to lib/librdkafka-2.4.0/src/rdkafka_event.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_feature.c b/lib/librdkafka-2.4.0/src/rdkafka_feature.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_feature.c rename to lib/librdkafka-2.4.0/src/rdkafka_feature.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_feature.h b/lib/librdkafka-2.4.0/src/rdkafka_feature.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_feature.h rename to lib/librdkafka-2.4.0/src/rdkafka_feature.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_fetcher.c b/lib/librdkafka-2.4.0/src/rdkafka_fetcher.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_fetcher.c rename to lib/librdkafka-2.4.0/src/rdkafka_fetcher.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_fetcher.h b/lib/librdkafka-2.4.0/src/rdkafka_fetcher.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_fetcher.h rename to lib/librdkafka-2.4.0/src/rdkafka_fetcher.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_header.c b/lib/librdkafka-2.4.0/src/rdkafka_header.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_header.c rename to lib/librdkafka-2.4.0/src/rdkafka_header.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_header.h b/lib/librdkafka-2.4.0/src/rdkafka_header.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_header.h rename to lib/librdkafka-2.4.0/src/rdkafka_header.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_idempotence.c b/lib/librdkafka-2.4.0/src/rdkafka_idempotence.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_idempotence.c rename to lib/librdkafka-2.4.0/src/rdkafka_idempotence.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_idempotence.h b/lib/librdkafka-2.4.0/src/rdkafka_idempotence.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_idempotence.h rename to lib/librdkafka-2.4.0/src/rdkafka_idempotence.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_int.h b/lib/librdkafka-2.4.0/src/rdkafka_int.h similarity index 96% rename from lib/librdkafka-2.3.0/src/rdkafka_int.h rename to lib/librdkafka-2.4.0/src/rdkafka_int.h index e586dd6e692..46acf948565 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_int.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_int.h @@ -879,9 +879,14 @@ void rd_kafka_log0(const rd_kafka_conf_t *conf, rd_kafka_log0(&rk->rk_conf, rk, NULL, level, RD_KAFKA_DBG_NONE, fac, \ __VA_ARGS__) +#define rd_kafka_conf_is_dbg(conf, ctx) \ + unlikely((conf).debug &(RD_KAFKA_DBG_##ctx)) + +#define rd_kafka_is_dbg(rk, ctx) (rd_kafka_conf_is_dbg(rk->rk_conf, ctx)) + #define rd_kafka_dbg(rk, ctx, fac, ...) \ do { \ - if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_##ctx))) \ + if (rd_kafka_is_dbg(rk, ctx)) \ rd_kafka_log0(&rk->rk_conf, rk, NULL, LOG_DEBUG, \ (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \ } while (0) @@ -889,7 +894,7 @@ void rd_kafka_log0(const rd_kafka_conf_t *conf, /* dbg() not requiring an rk, just the conf object, for early logging */ #define rd_kafka_dbg0(conf, ctx, fac, ...) \ do { \ - if (unlikely((conf)->debug & (RD_KAFKA_DBG_##ctx))) \ + if (rd_kafka_conf_is_dbg(*conf, ctx)) \ rd_kafka_log0(conf, NULL, NULL, LOG_DEBUG, \ (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \ } while (0) @@ -909,10 +914,11 @@ void rd_kafka_log0(const rd_kafka_conf_t *conf, #define rd_rkb_log(rkb, level, fac, ...) \ rd_rkb_log0(rkb, level, RD_KAFKA_DBG_NONE, fac, __VA_ARGS__) +#define rd_rkb_is_dbg(rkb, ctx) rd_kafka_is_dbg((rkb)->rkb_rk, ctx) + #define rd_rkb_dbg(rkb, ctx, fac, ...) \ do { \ - if (unlikely((rkb)->rkb_rk->rk_conf.debug & \ - (RD_KAFKA_DBG_##ctx))) { \ + if (rd_rkb_is_dbg(rkb, ctx)) { \ rd_rkb_log0(rkb, LOG_DEBUG, (RD_KAFKA_DBG_##ctx), fac, \ __VA_ARGS__); \ } \ @@ -953,10 +959,15 @@ static RD_INLINE RD_UNUSED rd_kafka_resp_err_t rd_kafka_fatal_error_code(rd_kafka_t *rk) { /* This is an optimization to avoid an atomic read which are costly * on some platforms: - * Fatal errors are currently only raised by the idempotent producer - * and static consumers (group.instance.id). */ + * Fatal errors are currently raised by: + * 1) the idempotent producer + * 2) static consumers (group.instance.id) + * 3) Group using consumer protocol (Introduced in KIP-848). See exact + * errors in rd_kafka_cgrp_handle_ConsumerGroupHeartbeat() */ if ((rk->rk_type == RD_KAFKA_PRODUCER && rk->rk_conf.eos.idempotence) || - (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_conf.group_instance_id)) + (rk->rk_type == RD_KAFKA_CONSUMER && + (rk->rk_conf.group_instance_id || + rk->rk_conf.group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER))) return rd_atomic32_get(&rk->rk_fatal.err); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -1040,8 +1051,18 @@ static RD_INLINE RD_UNUSED void rd_kafka_app_poll_blocking(rd_kafka_t *rk) { * @locks none */ static RD_INLINE RD_UNUSED void rd_kafka_app_polled(rd_kafka_t *rk) { - if (rk->rk_type == RD_KAFKA_CONSUMER) + if (rk->rk_type == RD_KAFKA_CONSUMER) { rd_atomic64_set(&rk->rk_ts_last_poll, rd_clock()); + if (unlikely(rk->rk_cgrp && + rk->rk_cgrp->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER && + rk->rk_cgrp->rkcg_flags & + RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED)) { + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, + "app polled after poll interval exceeded"); + } + } } diff --git a/lib/librdkafka-2.3.0/src/rdkafka_interceptor.c b/lib/librdkafka-2.4.0/src/rdkafka_interceptor.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_interceptor.c rename to lib/librdkafka-2.4.0/src/rdkafka_interceptor.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_interceptor.h b/lib/librdkafka-2.4.0/src/rdkafka_interceptor.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_interceptor.h rename to lib/librdkafka-2.4.0/src/rdkafka_interceptor.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_lz4.c b/lib/librdkafka-2.4.0/src/rdkafka_lz4.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_lz4.c rename to lib/librdkafka-2.4.0/src/rdkafka_lz4.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_lz4.h b/lib/librdkafka-2.4.0/src/rdkafka_lz4.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_lz4.h rename to lib/librdkafka-2.4.0/src/rdkafka_lz4.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_metadata.c b/lib/librdkafka-2.4.0/src/rdkafka_metadata.c similarity index 92% rename from lib/librdkafka-2.3.0/src/rdkafka_metadata.c rename to lib/librdkafka-2.4.0/src/rdkafka_metadata.c index de90b166e68..7e9c90376d2 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_metadata.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_metadata.c @@ -126,15 +126,16 @@ rd_kafka_metadata(rd_kafka_t *rk, rd_kafka_op_set_replyq(rko, rkq, 0); rko->rko_u.metadata.force = 1; /* Force metadata request regardless * of outstanding metadata requests. */ - rd_kafka_MetadataRequest( - rkb, &topics, "application requested", allow_auto_create_topics, - /* cgrp_update: - * Only update consumer group state - * on response if this lists all - * topics in the cluster, since a - * partial request may make it seem - * like some subscribed topics are missing. */ - all_topics ? rd_true : rd_false, rd_false /* force_racks */, rko); + rd_kafka_MetadataRequest(rkb, &topics, NULL, "application requested", + allow_auto_create_topics, + /* cgrp_update: + * Only update consumer group state + * on response if this lists all + * topics in the cluster, since a + * partial request may make it seem + * like some subscribed topics are missing. */ + all_topics ? rd_true : rd_false, + rd_false /* force_racks */, rko); rd_list_destroy(&topics); rd_kafka_broker_destroy(rkb); @@ -473,11 +474,14 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, rd_kafka_metadata_internal_t *mdi = NULL; rd_kafka_metadata_t *md = NULL; size_t rkb_namelen; - const int log_decode_errors = LOG_ERR; - rd_list_t *missing_topics = NULL; - const rd_list_t *requested_topics = request_topics; - rd_bool_t all_topics = rd_false; - rd_bool_t cgrp_update = rd_false; + const int log_decode_errors = LOG_ERR; + rd_list_t *missing_topics = NULL; + rd_list_t *missing_topic_ids = NULL; + + const rd_list_t *requested_topics = request_topics; + const rd_list_t *requested_topic_ids = NULL; + rd_bool_t all_topics = rd_false; + rd_bool_t cgrp_update = rd_false; rd_bool_t has_reliable_leader_epochs = rd_kafka_has_reliable_leader_epochs(rkb); int ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion; @@ -486,7 +490,7 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; int broker_changes = 0; int cache_changes = 0; - rd_ts_t ts_start = rd_clock(); + /* If client rack is present, the metadata cache (topic or full) needs * to contain the partition to rack map. */ rd_bool_t has_client_rack = rk->rk_conf.client_rack && @@ -494,8 +498,9 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, rd_bool_t compute_racks = has_client_rack; if (request) { - requested_topics = request->rkbuf_u.Metadata.topics; - all_topics = request->rkbuf_u.Metadata.all_topics; + requested_topics = request->rkbuf_u.Metadata.topics; + requested_topic_ids = request->rkbuf_u.Metadata.topic_ids; + all_topics = request->rkbuf_u.Metadata.all_topics; cgrp_update = request->rkbuf_u.Metadata.cgrp_update && rk->rk_cgrp; compute_racks |= request->rkbuf_u.Metadata.force_racks; @@ -517,6 +522,9 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, if (requested_topics) missing_topics = rd_list_copy(requested_topics, rd_list_string_copy, NULL); + if (requested_topic_ids) + missing_topic_ids = + rd_list_copy(requested_topic_ids, rd_list_Uuid_copy, NULL); rd_kafka_broker_lock(rkb); rkb_namelen = strlen(rkb->rkb_name) + 1; @@ -633,6 +641,8 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, if (ApiVersion >= 10) { rd_kafka_buf_read_uuid(rkbuf, &mdi->topics[i].topic_id); + } else { + mdi->topics[i].topic_id = RD_KAFKA_UUID_ZERO; } if (ApiVersion >= 1) @@ -829,37 +839,43 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, rd_kafka_parse_Metadata_update_topic(rkb, &md->topics[i], &mdi->topics[i]); - if (requested_topics) { + if (requested_topics) rd_list_free_cb(missing_topics, rd_list_remove_cmp(missing_topics, md->topics[i].topic, (void *)strcmp)); - if (!all_topics) { - /* Only update cache when not asking - * for all topics. */ - - rd_kafka_wrlock(rk); - rd_kafka_metadata_cache_topic_update( - rk, &md->topics[i], &mdi->topics[i], - rd_false /*propagate later*/, - /* use has_client_rack rather than - compute_racks. We need cached rack ids - only in case we need to rejoin the group - if they change and client.rack is set - (KIP-881). */ - has_client_rack, mdi->brokers, - md->broker_cnt); - cache_changes++; - rd_kafka_wrunlock(rk); - } - } + if (requested_topic_ids) + rd_list_free_cb( + missing_topic_ids, + rd_list_remove_cmp(missing_topic_ids, + &mdi->topics[i].topic_id, + (void *)rd_kafka_Uuid_ptr_cmp)); + /* Only update cache when not asking + * for all topics or cache entry + * already exists. */ + rd_kafka_wrlock(rk); + cache_changes += + rd_kafka_metadata_cache_topic_update( + rk, &md->topics[i], &mdi->topics[i], + rd_false /*propagate later*/, + /* use has_client_rack rather than + compute_racks. We need cached rack ids + only in case we need to rejoin the group + if they change and client.rack is set + (KIP-881). */ + has_client_rack, mdi->brokers, + md->broker_cnt, + all_topics /*cache entry needs to exist + *if all_topics*/); + rd_kafka_wrunlock(rk); } /* Requested topics not seen in metadata? Propogate to topic code. */ if (missing_topics) { char *topic; rd_rkb_dbg(rkb, TOPIC, "METADATA", - "%d/%d requested topic(s) seen in metadata", + "%d/%d requested topic(s) seen in metadata" + " (lookup by name)", rd_list_cnt(requested_topics) - rd_list_cnt(missing_topics), rd_list_cnt(requested_topics)); @@ -886,6 +902,42 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, } } } + if (missing_topic_ids) { + rd_kafka_Uuid_t *topic_id; + rd_rkb_dbg(rkb, TOPIC, "METADATA", + "%d/%d requested topic(s) seen in metadata" + " (lookup by id)", + rd_list_cnt(requested_topic_ids) - + rd_list_cnt(missing_topic_ids), + rd_list_cnt(requested_topic_ids)); + for (i = 0; i < rd_list_cnt(missing_topic_ids); i++) { + rd_kafka_Uuid_t *missing_topic_id = + missing_topic_ids->rl_elems[i]; + rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s", + rd_kafka_Uuid_base64str(missing_topic_id)); + } + RD_LIST_FOREACH(topic_id, missing_topic_ids, i) { + rd_kafka_topic_t *rkt; + + rd_kafka_rdlock(rk); + rkt = rd_kafka_topic_find_by_topic_id(rkb->rkb_rk, + *topic_id); + rd_kafka_rdunlock(rk); + if (rkt) { + /* Received metadata response contained no + * information about topic 'rkt' and thus + * indicates the topic is not available in the + * cluster. + * Mark the topic as non-existent */ + rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_set_notexists( + rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_wrunlock(rkt); + + rd_kafka_topic_destroy0(rkt); + } + } + } rd_kafka_wrlock(rkb->rkb_rk); @@ -928,9 +980,10 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, } if (all_topics) { - /* Expire all cache entries that were not updated. */ - rd_kafka_metadata_cache_evict_by_age(rkb->rkb_rk, ts_start); - + /* All hints have been replaced by the corresponding entry. + * Rest of hints can be removed as topics aren't present + * in full metadata. */ + rd_kafka_metadata_cache_purge_all_hints(rkb->rkb_rk); if (rkb->rkb_rk->rk_full_metadata) rd_kafka_metadata_destroy( &rkb->rkb_rk->rk_full_metadata->metadata); @@ -950,15 +1003,18 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, "Caching full metadata with " "%d broker(s) and %d topic(s): %s", md->broker_cnt, md->topic_cnt, reason); - } else { - if (cache_changes) - rd_kafka_metadata_cache_propagate_changes(rk); - rd_kafka_metadata_cache_expiry_start(rk); } - /* Remove cache hints for the originally requested topics. */ if (requested_topics) rd_kafka_metadata_cache_purge_hints(rk, requested_topics); + if (requested_topic_ids) + rd_kafka_metadata_cache_purge_hints_by_id(rk, + requested_topic_ids); + + if (cache_changes) { + rd_kafka_metadata_cache_propagate_changes(rk); + rd_kafka_metadata_cache_expiry_start(rk); + } rd_kafka_wrunlock(rkb->rkb_rk); @@ -974,7 +1030,8 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, * which may contain only a sub-set of the subscribed topics (namely * the effective subscription of available topics) as to not * propagate non-included topics as non-existent. */ - if (cgrp_update && (requested_topics || all_topics)) + if (cgrp_update && + (requested_topics || requested_topic_ids || all_topics)) rd_kafka_cgrp_metadata_update_check(rkb->rkb_rk->rk_cgrp, rd_true /*do join*/); @@ -989,6 +1046,8 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, done: if (missing_topics) rd_list_destroy(missing_topics); + if (missing_topic_ids) + rd_list_destroy(missing_topic_ids); /* This metadata request was triggered by someone wanting * the metadata information back as a reply, so send that reply now. @@ -1010,9 +1069,19 @@ rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, rd_kafka_metadata_cache_purge_hints(rk, requested_topics); rd_kafka_wrunlock(rkb->rkb_rk); } + if (requested_topic_ids) { + /* Failed requests shall purge cache hints for + * the requested topics. */ + rd_kafka_wrlock(rkb->rkb_rk); + rd_kafka_metadata_cache_purge_hints_by_id(rk, + requested_topic_ids); + rd_kafka_wrunlock(rkb->rkb_rk); + } if (missing_topics) rd_list_destroy(missing_topics); + if (missing_topic_ids) + rd_list_destroy(missing_topic_ids); rd_tmpabuf_destroy(&tbuf); return err; @@ -1344,8 +1413,9 @@ rd_kafka_metadata_refresh_topics(rd_kafka_t *rk, "Requesting metadata for %d/%d topics: %s", rd_list_cnt(&q_topics), rd_list_cnt(topics), reason); - rd_kafka_MetadataRequest(rkb, &q_topics, reason, allow_auto_create, - cgrp_update, rd_false /* force_racks */, NULL); + rd_kafka_MetadataRequest(rkb, &q_topics, NULL, reason, + allow_auto_create, cgrp_update, + rd_false /* force_racks */, NULL); rd_list_destroy(&q_topics); @@ -1521,7 +1591,7 @@ rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk, rd_list_init(&topics, 0, NULL); /* empty list = all topics */ rd_kafka_MetadataRequest( - rkb, &topics, reason, rd_false /*no auto create*/, + rkb, &topics, NULL, reason, rd_false /*no auto create*/, rd_true /*cgrp update*/, rd_false /* force_rack */, NULL); rd_list_destroy(&topics); @@ -1559,8 +1629,9 @@ rd_kafka_metadata_request(rd_kafka_t *rk, destroy_rkb = 1; } - rd_kafka_MetadataRequest(rkb, topics, reason, allow_auto_create_topics, - cgrp_update, rd_false /* force racks */, rko); + rd_kafka_MetadataRequest(rkb, topics, NULL, reason, + allow_auto_create_topics, cgrp_update, + rd_false /* force racks */, rko); if (destroy_rkb) rd_kafka_broker_destroy(rkb); diff --git a/lib/librdkafka-2.3.0/src/rdkafka_metadata.h b/lib/librdkafka-2.4.0/src/rdkafka_metadata.h similarity index 94% rename from lib/librdkafka-2.3.0/src/rdkafka_metadata.h rename to lib/librdkafka-2.4.0/src/rdkafka_metadata.h index 213bf2b8968..b0926845ef9 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_metadata.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_metadata.h @@ -219,7 +219,8 @@ rd_kafka_metadata_new_topic_with_partition_replicas_mock(int replication_factor, */ struct rd_kafka_metadata_cache_entry { - rd_avl_node_t rkmce_avlnode; /* rkmc_avl */ + rd_avl_node_t rkmce_avlnode; /* rkmc_avl */ + rd_avl_node_t rkmce_avlnode_by_id; /* rkmc_avl_by_id */ TAILQ_ENTRY(rd_kafka_metadata_cache_entry) rkmce_link; /* rkmc_expiry */ rd_ts_t rkmce_ts_expires; /* Expire time */ rd_ts_t rkmce_ts_insert; /* Insert time */ @@ -243,6 +244,7 @@ struct rd_kafka_metadata_cache_entry { struct rd_kafka_metadata_cache { rd_avl_t rkmc_avl; + rd_avl_t rkmc_avl_by_id; TAILQ_HEAD(, rd_kafka_metadata_cache_entry) rkmc_expiry; rd_kafka_timer_t rkmc_expiry_tmr; int rkmc_cnt; @@ -269,21 +271,30 @@ struct rd_kafka_metadata_cache { int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk, const char *topic); +int rd_kafka_metadata_cache_delete_by_topic_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id); void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk); -int rd_kafka_metadata_cache_evict_by_age(rd_kafka_t *rk, rd_ts_t ts); -void rd_kafka_metadata_cache_topic_update( +int rd_kafka_metadata_cache_purge_all_hints(rd_kafka_t *rk); +int rd_kafka_metadata_cache_topic_update( rd_kafka_t *rk, const rd_kafka_metadata_topic_t *mdt, const rd_kafka_metadata_topic_internal_t *mdit, rd_bool_t propagate, rd_bool_t include_metadata, rd_kafka_metadata_broker_internal_t *brokers, - size_t broker_cnt); + size_t broker_cnt, + rd_bool_t only_existing); void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk); struct rd_kafka_metadata_cache_entry * rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid); +struct rd_kafka_metadata_cache_entry * +rd_kafka_metadata_cache_find_by_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id, + int valid); void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk, const rd_list_t *topics); +void rd_kafka_metadata_cache_purge_hints_by_id(rd_kafka_t *rk, + const rd_list_t *topic_ids); int rd_kafka_metadata_cache_hint(rd_kafka_t *rk, const rd_list_t *topics, rd_list_t *dst, diff --git a/lib/librdkafka-2.3.0/src/rdkafka_metadata_cache.c b/lib/librdkafka-2.4.0/src/rdkafka_metadata_cache.c similarity index 81% rename from lib/librdkafka-2.3.0/src/rdkafka_metadata_cache.c rename to lib/librdkafka-2.4.0/src/rdkafka_metadata_cache.c index b3bad4de8d3..d4c93cd11c8 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_metadata_cache.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_metadata_cache.c @@ -80,8 +80,14 @@ static RD_INLINE void rd_kafka_metadata_cache_delete(rd_kafka_t *rk, struct rd_kafka_metadata_cache_entry *rkmce, int unlink_avl) { - if (unlink_avl) + if (unlink_avl) { RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, rkmce); + if (!RD_KAFKA_UUID_IS_ZERO( + rkmce->rkmce_metadata_internal_topic.topic_id)) { + RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl_by_id, + rkmce); + } + } TAILQ_REMOVE(&rk->rk_metadata_cache.rkmc_expiry, rkmce, rkmce_link); rd_kafka_assert(NULL, rk->rk_metadata_cache.rkmc_cnt > 0); rk->rk_metadata_cache.rkmc_cnt--; @@ -103,6 +109,21 @@ int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk, const char *topic) { return rkmce ? 1 : 0; } +/** + * @brief Delete cache entry by topic id + * @locks rd_kafka_wrlock() + * @returns 1 if entry was found and removed, else 0. + */ +int rd_kafka_metadata_cache_delete_by_topic_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id) { + struct rd_kafka_metadata_cache_entry *rkmce; + + rkmce = rd_kafka_metadata_cache_find_by_id(rk, topic_id, 1); + if (rkmce) + rd_kafka_metadata_cache_delete(rk, rkmce, 1); + return rkmce ? 1 : 0; +} + static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk); /** @@ -161,45 +182,27 @@ static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk) { /** - * @brief Evict timed out entries from cache based on their insert/update time - * rather than expiry time. Any entries older than \p ts will be evicted. + * @brief Remove all cache hints,. + * This is done when the Metadata response has been parsed and + * replaced hints with existing topic information, thus this will + * only remove unmatched topics from the cache. * - * @returns the number of entries evicted. + * @returns the number of purged hints * * @locks_required rd_kafka_wrlock() */ -int rd_kafka_metadata_cache_evict_by_age(rd_kafka_t *rk, rd_ts_t ts) { +int rd_kafka_metadata_cache_purge_all_hints(rd_kafka_t *rk) { int cnt = 0; struct rd_kafka_metadata_cache_entry *rkmce, *tmp; TAILQ_FOREACH_SAFE(rkmce, &rk->rk_metadata_cache.rkmc_expiry, rkmce_link, tmp) { - if (rkmce->rkmce_ts_insert <= ts) { + if (!RD_KAFKA_METADATA_CACHE_VALID(rkmce)) { rd_kafka_metadata_cache_delete(rk, rkmce, 1); cnt++; } } - /* Update expiry timer */ - rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry); - if (rkmce) - rd_kafka_timer_start(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_expiry_tmr, - rkmce->rkmce_ts_expires - rd_clock(), - rd_kafka_metadata_cache_evict_tmr_cb, rk); - else - rd_kafka_timer_stop(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_expiry_tmr, 1); - - rd_kafka_dbg(rk, METADATA, "METADATA", - "Expired %d entries older than %dms from metadata cache " - "(%d entries remain)", - cnt, (int)((rd_clock() - ts) / 1000), - rk->rk_metadata_cache.rkmc_cnt); - - if (cnt) - rd_kafka_metadata_cache_propagate_changes(rk); - return cnt; } @@ -221,6 +224,25 @@ rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid) { return NULL; } +/** + * @brief Find cache entry by topic id + * + * @param valid: entry must be valid (not hint) + * + * @locks rd_kafka_*lock() + */ +struct rd_kafka_metadata_cache_entry * +rd_kafka_metadata_cache_find_by_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id, + int valid) { + struct rd_kafka_metadata_cache_entry skel, *rkmce; + skel.rkmce_metadata_internal_topic.topic_id = topic_id; + rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl_by_id, &skel); + if (rkmce && (!valid || RD_KAFKA_METADATA_CACHE_VALID(rkmce))) + return rkmce; + return NULL; +} + /** * @brief Partition (id) comparator @@ -247,7 +269,7 @@ static struct rd_kafka_metadata_cache_entry *rd_kafka_metadata_cache_insert( rd_bool_t include_racks, rd_kafka_metadata_broker_internal_t *brokers_internal, size_t broker_cnt) { - struct rd_kafka_metadata_cache_entry *rkmce, *old; + struct rd_kafka_metadata_cache_entry *rkmce, *old, *old_by_id = NULL; rd_tmpabuf_t tbuf; int i; @@ -350,8 +372,28 @@ static struct rd_kafka_metadata_cache_entry *rd_kafka_metadata_cache_insert( /* Insert (and replace existing) entry. */ old = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl, rkmce, rkmce_avlnode); - if (old) + /* Insert (and replace existing) entry into the AVL tree sorted + * by topic id. */ + if (!RD_KAFKA_UUID_IS_ZERO( + rkmce->rkmce_metadata_internal_topic.topic_id)) { + /* If topic id isn't zero insert cache entry into this tree */ + old_by_id = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl_by_id, + rkmce, rkmce_avlnode_by_id); + } else if (old && !RD_KAFKA_UUID_IS_ZERO( + old->rkmce_metadata_internal_topic.topic_id)) { + /* If it had a topic id, remove it from the tree */ + RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl_by_id, old); + } + if (old) { + /* Delete and free old cache entry */ rd_kafka_metadata_cache_delete(rk, old, 0); + } + if (old_by_id && old_by_id != old) { + /* If there was a different cache entry in this tree, + * remove and free it. */ + RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, old_by_id); + rd_kafka_metadata_cache_delete(rk, old_by_id, 0); + } /* Explicitly not freeing the tmpabuf since rkmce points to its * memory. */ @@ -414,40 +456,66 @@ void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk) { * For permanent errors (authorization failures), we keep * the entry cached for metadata.max.age.ms. * + * @param only_existing Update only existing metadata cache entries, + * either valid or hinted. + * + * @return 1 on metadata change, 0 when no change was applied + * * @remark The cache expiry timer will not be updated/started, * call rd_kafka_metadata_cache_expiry_start() instead. * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_topic_update( +int rd_kafka_metadata_cache_topic_update( rd_kafka_t *rk, const rd_kafka_metadata_topic_t *mdt, const rd_kafka_metadata_topic_internal_t *mdit, rd_bool_t propagate, rd_bool_t include_racks, rd_kafka_metadata_broker_internal_t *brokers, - size_t broker_cnt) { - rd_ts_t now = rd_clock(); + size_t broker_cnt, + rd_bool_t only_existing) { + struct rd_kafka_metadata_cache_entry *rkmce = NULL; + rd_ts_t now = rd_clock(); rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000); int changed = 1; + if (only_existing) { + if (likely(mdt->topic != NULL)) { + rkmce = rd_kafka_metadata_cache_find(rk, mdt->topic, 0); + } else { + rkmce = rd_kafka_metadata_cache_find_by_id( + rk, mdit->topic_id, 1); + } + if (!rkmce) + return 0; + } - /* Cache unknown topics for a short while (100ms) to allow the cgrp - * logic to find negative cache hits. */ - if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) - ts_expires = RD_MIN(ts_expires, now + (100 * 1000)); - - if (!mdt->err || - mdt->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED || - mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) - rd_kafka_metadata_cache_insert(rk, mdt, mdit, now, ts_expires, - include_racks, brokers, - broker_cnt); - else - changed = - rd_kafka_metadata_cache_delete_by_name(rk, mdt->topic); + if (likely(mdt->topic != NULL)) { + /* Cache unknown topics for a short while (100ms) to allow the + * cgrp logic to find negative cache hits. */ + if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + ts_expires = RD_MIN(ts_expires, now + (100 * 1000)); + + if (!mdt->err || + mdt->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED || + mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + rd_kafka_metadata_cache_insert( + rk, mdt, mdit, now, ts_expires, include_racks, + brokers, broker_cnt); + else + changed = rd_kafka_metadata_cache_delete_by_name( + rk, mdt->topic); + } else { + /* Cache entry found but no topic name: + * delete it. */ + changed = rd_kafka_metadata_cache_delete_by_topic_id( + rk, mdit->topic_id); + } if (changed && propagate) rd_kafka_metadata_cache_propagate_changes(rk); + + return changed; } @@ -485,6 +553,40 @@ void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk, } } +/** + * @brief Remove cache hints for topic ids in \p topic_ids + * This is done when the Metadata response has been parsed and + * replaced hints with existing topic information, thus this will + * only remove unmatched topics from the cache. + * + * @locks rd_kafka_wrlock() + */ +void rd_kafka_metadata_cache_purge_hints_by_id(rd_kafka_t *rk, + const rd_list_t *topic_ids) { + const rd_kafka_Uuid_t *topic_id; + int i; + int cnt = 0; + + RD_LIST_FOREACH(topic_id, topic_ids, i) { + struct rd_kafka_metadata_cache_entry *rkmce; + + if (!(rkmce = rd_kafka_metadata_cache_find_by_id(rk, *topic_id, + 0 /*any*/)) || + RD_KAFKA_METADATA_CACHE_VALID(rkmce)) + continue; + + rd_kafka_metadata_cache_delete(rk, rkmce, 1 /*unlink avl*/); + cnt++; + } + + if (cnt > 0) { + rd_kafka_dbg(rk, METADATA, "METADATA", + "Purged %d/%d cached topic hint(s)", cnt, + rd_list_cnt(topic_ids)); + rd_kafka_metadata_cache_propagate_changes(rk); + } +} + /** * @brief Inserts a non-valid entry for topics in \p topics indicating @@ -589,6 +691,16 @@ static int rd_kafka_metadata_cache_entry_cmp(const void *_a, const void *_b) { return strcmp(a->rkmce_mtopic.topic, b->rkmce_mtopic.topic); } +/** + * @brief Cache entry comparator (on topic id) + */ +static int rd_kafka_metadata_cache_entry_by_id_cmp(const void *_a, + const void *_b) { + const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b; + return rd_kafka_Uuid_cmp(a->rkmce_metadata_internal_topic.topic_id, + b->rkmce_metadata_internal_topic.topic_id); +} + /** * @brief Initialize the metadata cache @@ -598,6 +710,8 @@ static int rd_kafka_metadata_cache_entry_cmp(const void *_a, const void *_b) { void rd_kafka_metadata_cache_init(rd_kafka_t *rk) { rd_avl_init(&rk->rk_metadata_cache.rkmc_avl, rd_kafka_metadata_cache_entry_cmp, 0); + rd_avl_init(&rk->rk_metadata_cache.rkmc_avl_by_id, + rd_kafka_metadata_cache_entry_by_id_cmp, 0); TAILQ_INIT(&rk->rk_metadata_cache.rkmc_expiry); mtx_init(&rk->rk_metadata_cache.rkmc_full_lock, mtx_plain); mtx_init(&rk->rk_metadata_cache.rkmc_cnd_lock, mtx_plain); @@ -620,6 +734,7 @@ void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk) { mtx_destroy(&rk->rk_metadata_cache.rkmc_cnd_lock); cnd_destroy(&rk->rk_metadata_cache.rkmc_cnd); rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl); + rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl_by_id); } diff --git a/lib/librdkafka-2.3.0/src/rdkafka_mock.c b/lib/librdkafka-2.4.0/src/rdkafka_mock.c similarity index 95% rename from lib/librdkafka-2.3.0/src/rdkafka_mock.c rename to lib/librdkafka-2.4.0/src/rdkafka_mock.c index 6c8df688c78..a473f0915dc 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_mock.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_mock.c @@ -469,7 +469,39 @@ rd_kafka_mock_partition_assign_replicas(rd_kafka_mock_partition_t *mpart, mpart, mpart->replicas[rd_jitter(0, replica_cnt - 1)]); } +/** + * @brief Push a partition leader response to passed \p mpart . + */ +static void +rd_kafka_mock_partition_push_leader_response0(rd_kafka_mock_partition_t *mpart, + int32_t leader_id, + int32_t leader_epoch) { + rd_kafka_mock_partition_leader_t *leader_response; + + leader_response = rd_calloc(1, sizeof(*leader_response)); + leader_response->leader_id = leader_id; + leader_response->leader_epoch = leader_epoch; + TAILQ_INSERT_TAIL(&mpart->leader_responses, leader_response, link); +} + +/** + * @brief Return the first mocked partition leader response in \p mpart , + * if available. + */ +rd_kafka_mock_partition_leader_t * +rd_kafka_mock_partition_next_leader_response(rd_kafka_mock_partition_t *mpart) { + return TAILQ_FIRST(&mpart->leader_responses); +} +/** + * @brief Unlink and destroy a partition leader response + */ +void rd_kafka_mock_partition_leader_destroy( + rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_partition_leader_t *mpart_leader) { + TAILQ_REMOVE(&mpart->leader_responses, mpart_leader, link); + rd_free(mpart_leader); +} /** * @brief Unlink and destroy committed offset @@ -546,6 +578,7 @@ rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart, static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) { rd_kafka_mock_msgset_t *mset, *tmp; rd_kafka_mock_committed_offset_t *coff, *tmpcoff; + rd_kafka_mock_partition_leader_t *mpart_leader, *tmp_mpart_leader; TAILQ_FOREACH_SAFE(mset, &mpart->msgsets, link, tmp) rd_kafka_mock_msgset_destroy(mpart, mset); @@ -553,6 +586,10 @@ static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) { TAILQ_FOREACH_SAFE(coff, &mpart->committed_offsets, link, tmpcoff) rd_kafka_mock_committed_offset_destroy(mpart, coff); + TAILQ_FOREACH_SAFE(mpart_leader, &mpart->leader_responses, link, + tmp_mpart_leader) + rd_kafka_mock_partition_leader_destroy(mpart, mpart_leader); + rd_list_destroy(&mpart->pidstates); rd_free(mpart->replicas); @@ -579,6 +616,7 @@ static void rd_kafka_mock_partition_init(rd_kafka_mock_topic_t *mtopic, mpart->update_follower_end_offset = rd_true; TAILQ_INIT(&mpart->committed_offsets); + TAILQ_INIT(&mpart->leader_responses); rd_list_init(&mpart->pidstates, 0, rd_free); @@ -618,7 +656,9 @@ rd_kafka_mock_topic_new(rd_kafka_mock_cluster_t *mcluster, rd_kafka_mock_topic_t *mtopic; int i; - mtopic = rd_calloc(1, sizeof(*mtopic)); + mtopic = rd_calloc(1, sizeof(*mtopic)); + /* Assign random topic id */ + mtopic->id = rd_kafka_Uuid_random(); mtopic->name = rd_strdup(topic); mtopic->cluster = mcluster; @@ -671,6 +711,28 @@ rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster, return NULL; } +/** + * @brief Find a mock topic by id. + * + * @param mcluster Cluster to search in. + * @param id Topic id to find. + * @return Found topic or NULL. + * + * @locks mcluster->lock MUST be held. + */ +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find_by_id(const rd_kafka_mock_cluster_t *mcluster, + rd_kafka_Uuid_t id) { + const rd_kafka_mock_topic_t *mtopic; + + TAILQ_FOREACH(mtopic, &mcluster->topics, link) { + if (!rd_kafka_Uuid_cmp(mtopic->id, id)) + return (rd_kafka_mock_topic_t *)mtopic; + } + + return NULL; +} + /** * @brief Create a topic using default settings. @@ -2072,6 +2134,23 @@ rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } +rd_kafka_resp_err_t +rd_kafka_mock_partition_push_leader_response(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition, + int32_t leader_id, + int32_t leader_epoch) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_PUSH_LEADER_RESPONSE; + rko->rko_u.mock.partition = partition; + rko->rko_u.mock.leader_id = leader_id; + rko->rko_u.mock.leader_epoch = leader_epoch; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) { @@ -2355,6 +2434,23 @@ rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster, mpart->update_follower_end_offset = rd_false; } break; + case RD_KAFKA_MOCK_CMD_PART_PUSH_LEADER_RESPONSE: + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); + if (!mpart) + return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Push %s [%" PRId32 "] leader response: (%" PRId32 + ", %" PRId32 ")", + rko->rko_u.mock.name, rko->rko_u.mock.partition, + rko->rko_u.mock.leader_id, + rko->rko_u.mock.leader_epoch); + + rd_kafka_mock_partition_push_leader_response0( + mpart, rko->rko_u.mock.leader_id, + rko->rko_u.mock.leader_epoch); + break; /* Broker commands */ case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN: @@ -2649,8 +2745,16 @@ rd_kafka_mock_request_copy(rd_kafka_mock_request_t *mrequest) { return request; } -void rd_kafka_mock_request_destroy(rd_kafka_mock_request_t *element) { - rd_free(element); +void rd_kafka_mock_request_destroy(rd_kafka_mock_request_t *mrequest) { + rd_free(mrequest); +} + +void rd_kafka_mock_request_destroy_array(rd_kafka_mock_request_t **mrequests, + size_t mrequest_cnt) { + size_t i; + for (i = 0; i < mrequest_cnt; i++) + rd_kafka_mock_request_destroy(mrequests[i]); + rd_free(mrequests); } static void rd_kafka_mock_request_free(void *element) { diff --git a/lib/librdkafka-2.3.0/src/rdkafka_mock.h b/lib/librdkafka-2.4.0/src/rdkafka_mock.h similarity index 93% rename from lib/librdkafka-2.3.0/src/rdkafka_mock.h rename to lib/librdkafka-2.4.0/src/rdkafka_mock.h index 822680c501a..737b768339d 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_mock.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_mock.h @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -281,6 +282,24 @@ rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, int64_t lo, int64_t hi); +/** + * @brief Push \p cnt Metadata leader response + * onto the cluster's stack for the given \p topic and \p partition. + * + * @param topic Topic to change + * @param partition Partition to change in \p topic + * @param leader_id Broker id of the leader node + * @param leader_epoch Leader epoch corresponding to the given \p leader_id + * + * @return Push operation error code + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_mock_partition_push_leader_response(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition, + int32_t leader_id, + int32_t leader_epoch); /** * @brief Disconnects the broker and disallows any new connections. @@ -388,6 +407,13 @@ typedef struct rd_kafka_mock_request_s rd_kafka_mock_request_t; */ RD_EXPORT void rd_kafka_mock_request_destroy(rd_kafka_mock_request_t *mreq); +/** + * @brief Destroy a rd_kafka_mock_request_t * array and deallocate it. + */ +RD_EXPORT void +rd_kafka_mock_request_destroy_array(rd_kafka_mock_request_t **mreqs, + size_t mreq_cnt); + /** * @brief Get the broker id to which \p mreq was sent. */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_mock_cgrp.c b/lib/librdkafka-2.4.0/src/rdkafka_mock_cgrp.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_mock_cgrp.c rename to lib/librdkafka-2.4.0/src/rdkafka_mock_cgrp.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_mock_handlers.c b/lib/librdkafka-2.4.0/src/rdkafka_mock_handlers.c similarity index 96% rename from lib/librdkafka-2.3.0/src/rdkafka_mock_handlers.c rename to lib/librdkafka-2.4.0/src/rdkafka_mock_handlers.c index 047f890f5ef..9fd5667ce60 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_mock_handlers.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_mock_handlers.c @@ -759,10 +759,10 @@ static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn, /* FIXME: also check that partitions are assigned to member */ } - rd_kafka_buf_read_i32(rkbuf, &TopicsCnt); + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); /* Response: #Topics */ - rd_kafka_buf_write_i32(resp, TopicsCnt); + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); while (TopicsCnt-- > 0) { rd_kafkap_str_t Topic; @@ -770,14 +770,15 @@ static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn, rd_kafka_mock_topic_t *mtopic; rd_kafka_buf_read_str(rkbuf, &Topic); - rd_kafka_buf_read_i32(rkbuf, &PartitionCnt); + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, + RD_KAFKAP_PARTITIONS_MAX); mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); /* Response: Topic */ rd_kafka_buf_write_kstr(resp, &Topic); /* Response: #Partitions */ - rd_kafka_buf_write_i32(resp, PartitionCnt); + rd_kafka_buf_write_arraycnt(resp, PartitionCnt); while (PartitionCnt-- > 0) { int32_t Partition; @@ -817,6 +818,7 @@ static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn, } rd_kafka_buf_read_str(rkbuf, &Metadata); + rd_kafka_buf_skip_tags(rkbuf); if (!err) rd_kafka_mock_commit_offset(mpart, &GroupId, @@ -825,7 +827,10 @@ static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn, /* Response: ErrorCode */ rd_kafka_buf_write_i16(resp, err); + rd_kafka_buf_write_tags(resp); } + rd_kafka_buf_skip_tags(rkbuf); + rd_kafka_buf_write_tags(resp); } rd_kafka_mock_connection_send_response(mconn, resp); @@ -852,7 +857,8 @@ static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn, * @param mtopic may be NULL */ static void -rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp, +rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_buf_t *resp, int16_t ApiVersion, const char *topic, const rd_kafka_mock_topic_t *mtopic, @@ -875,20 +881,46 @@ rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp, rd_kafka_buf_write_arraycnt(resp, partition_cnt); for (i = 0; mtopic && i < partition_cnt; i++) { - const rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i]; + rd_kafka_mock_partition_leader_t *mpart_leader; + rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i]; int r; /* Response: ..Partitions.ErrorCode */ rd_kafka_buf_write_i16(resp, 0); /* Response: ..Partitions.PartitionIndex */ rd_kafka_buf_write_i32(resp, mpart->id); - /* Response: ..Partitions.Leader */ - rd_kafka_buf_write_i32(resp, - mpart->leader ? mpart->leader->id : -1); - if (ApiVersion >= 7) { - /* Response: ..Partitions.LeaderEpoch */ - rd_kafka_buf_write_i32(resp, mpart->leader_epoch); + mpart_leader = + rd_kafka_mock_partition_next_leader_response(mpart); + if (mpart_leader) { + rd_kafka_dbg( + mcluster->rk, MOCK, "MOCK", + "MetadataRequest: using next leader response " + "(%" PRId32 ", %" PRId32 ")", + mpart_leader->leader_id, + mpart_leader->leader_epoch); + + /* Response: ..Partitions.Leader */ + rd_kafka_buf_write_i32(resp, mpart_leader->leader_id); + + if (ApiVersion >= 7) { + /* Response: ..Partitions.LeaderEpoch */ + rd_kafka_buf_write_i32( + resp, mpart_leader->leader_epoch); + } + rd_kafka_mock_partition_leader_destroy(mpart, + mpart_leader); + mpart_leader = NULL; + } else { + /* Response: ..Partitions.Leader */ + rd_kafka_buf_write_i32( + resp, mpart->leader ? mpart->leader->id : -1); + + if (ApiVersion >= 7) { + /* Response: ..Partitions.LeaderEpoch */ + rd_kafka_buf_write_i32(resp, + mpart->leader_epoch); + } } /* Response: ..Partitions.#ReplicaNodes */ @@ -1005,8 +1037,8 @@ static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn, TAILQ_FOREACH(mtopic, &mcluster->topics, link) { rd_kafka_mock_buf_write_Metadata_Topic( - resp, rkbuf->rkbuf_reqhdr.ApiVersion, mtopic->name, - mtopic, mtopic->err); + mcluster, resp, rkbuf->rkbuf_reqhdr.ApiVersion, + mtopic->name, mtopic, mtopic->err); } } else if (requested_topics) { @@ -1028,8 +1060,8 @@ static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn, err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; rd_kafka_mock_buf_write_Metadata_Topic( - resp, rkbuf->rkbuf_reqhdr.ApiVersion, rktpar->topic, - mtopic, err ? err : mtopic->err); + mcluster, resp, rkbuf->rkbuf_reqhdr.ApiVersion, + rktpar->topic, mtopic, err ? err : mtopic->err); } } else { @@ -2128,7 +2160,7 @@ const struct rd_kafka_mock_api_handler [RD_KAFKAP_Fetch] = {0, 11, -1, rd_kafka_mock_handle_Fetch}, [RD_KAFKAP_ListOffsets] = {0, 7, 6, rd_kafka_mock_handle_ListOffsets}, [RD_KAFKAP_OffsetFetch] = {0, 6, 6, rd_kafka_mock_handle_OffsetFetch}, - [RD_KAFKAP_OffsetCommit] = {0, 8, 8, rd_kafka_mock_handle_OffsetCommit}, + [RD_KAFKAP_OffsetCommit] = {0, 9, 8, rd_kafka_mock_handle_OffsetCommit}, [RD_KAFKAP_ApiVersion] = {0, 2, 3, rd_kafka_mock_handle_ApiVersion}, [RD_KAFKAP_Metadata] = {0, 9, 9, rd_kafka_mock_handle_Metadata}, [RD_KAFKAP_FindCoordinator] = {0, 3, 3, diff --git a/lib/librdkafka-2.3.0/src/rdkafka_mock_int.h b/lib/librdkafka-2.4.0/src/rdkafka_mock_int.h similarity index 95% rename from lib/librdkafka-2.3.0/src/rdkafka_mock_int.h rename to lib/librdkafka-2.4.0/src/rdkafka_mock_int.h index 87da2d4e312..ea91363110a 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_mock_int.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_mock_int.h @@ -235,6 +235,16 @@ typedef struct rd_kafka_mock_committed_offset_s { rd_kafkap_str_t *metadata; /**< Metadata, allocated separately */ } rd_kafka_mock_committed_offset_t; +/** + * @struct Leader id and epoch to return in a Metadata call. + */ +typedef struct rd_kafka_mock_partition_leader_s { + /**< Link to prev/next entries */ + TAILQ_ENTRY(rd_kafka_mock_partition_leader_s) link; + int32_t leader_id; /**< Leader id */ + int32_t leader_epoch; /**< Leader epoch */ +} rd_kafka_mock_partition_leader_t; + TAILQ_HEAD(rd_kafka_mock_msgset_tailq_s, rd_kafka_mock_msgset_s); @@ -276,6 +286,10 @@ typedef struct rd_kafka_mock_partition_s { int32_t follower_id; /**< Preferred replica/follower */ struct rd_kafka_mock_topic_s *topic; + + /**< Leader responses */ + TAILQ_HEAD(, rd_kafka_mock_partition_leader_s) + leader_responses; } rd_kafka_mock_partition_t; @@ -285,6 +299,7 @@ typedef struct rd_kafka_mock_partition_s { typedef struct rd_kafka_mock_topic_s { TAILQ_ENTRY(rd_kafka_mock_topic_s) link; char *name; + rd_kafka_Uuid_t id; rd_kafka_mock_partition_t *partitions; int partition_cnt; @@ -434,6 +449,11 @@ rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster, rd_kafka_mock_topic_t * rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster, const rd_kafkap_str_t *kname); + +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find_by_id(const rd_kafka_mock_cluster_t *mcluster, + rd_kafka_Uuid_t id); + rd_kafka_mock_broker_t * rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster, rd_kafka_coordtype_t KeyType, @@ -471,6 +491,13 @@ int64_t rd_kafka_mock_partition_offset_for_leader_epoch( const rd_kafka_mock_partition_t *mpart, int32_t leader_epoch); +rd_kafka_mock_partition_leader_t * +rd_kafka_mock_partition_next_leader_response(rd_kafka_mock_partition_t *mpart); + +void rd_kafka_mock_partition_leader_destroy( + rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_partition_leader_t *mpart_leader); + /** * @returns true if the ApiVersion is supported, else false. diff --git a/lib/librdkafka-2.3.0/src/rdkafka_msg.c b/lib/librdkafka-2.4.0/src/rdkafka_msg.c similarity index 97% rename from lib/librdkafka-2.3.0/src/rdkafka_msg.c rename to lib/librdkafka-2.4.0/src/rdkafka_msg.c index 5e71209dbfc..3fc3967c92d 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_msg.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_msg.c @@ -58,6 +58,15 @@ const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { return rd_kafka_err2str(rkmessage->err); } +const char * +rd_kafka_message_produce_errstr(const rd_kafka_message_t *rkmessage) { + if (!rkmessage->err) + return NULL; + rd_kafka_msg_t *rkm = (rd_kafka_msg_t *)rkmessage; + return rkm->rkm_u.producer.errstr; +} + + /** * @brief Check if producing is allowed. @@ -1903,7 +1912,45 @@ void rd_kafka_msgq_verify_order0(const char *function, rd_assert(!errcnt); } +rd_kafka_Produce_result_t *rd_kafka_Produce_result_new(int64_t offset, + int64_t timestamp) { + rd_kafka_Produce_result_t *ret = rd_calloc(1, sizeof(*ret)); + ret->offset = offset; + ret->timestamp = timestamp; + return ret; +} +void rd_kafka_Produce_result_destroy(rd_kafka_Produce_result_t *result) { + if (result->record_errors) { + int32_t i; + for (i = 0; i < result->record_errors_cnt; i++) { + RD_IF_FREE(result->record_errors[i].errstr, rd_free); + } + rd_free(result->record_errors); + } + RD_IF_FREE(result->errstr, rd_free); + rd_free(result); +} + +rd_kafka_Produce_result_t * +rd_kafka_Produce_result_copy(const rd_kafka_Produce_result_t *result) { + rd_kafka_Produce_result_t *ret = rd_calloc(1, sizeof(*ret)); + *ret = *result; + if (result->errstr) + ret->errstr = rd_strdup(result->errstr); + if (result->record_errors) { + ret->record_errors = rd_calloc(result->record_errors_cnt, + sizeof(*result->record_errors)); + int32_t i; + for (i = 0; i < result->record_errors_cnt; i++) { + ret->record_errors[i] = result->record_errors[i]; + if (result->record_errors[i].errstr) + ret->record_errors[i].errstr = + rd_strdup(result->record_errors[i].errstr); + } + } + return ret; +} /** * @name Unit tests diff --git a/lib/librdkafka-2.3.0/src/rdkafka_msg.h b/lib/librdkafka-2.4.0/src/rdkafka_msg.h similarity index 94% rename from lib/librdkafka-2.3.0/src/rdkafka_msg.h rename to lib/librdkafka-2.4.0/src/rdkafka_msg.h index db09892d57b..663aa005d6c 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_msg.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_msg.h @@ -65,6 +65,26 @@ #define RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL (1 << 4) #define RD_KAFKA_MSGSET_V2_ATTR_CONTROL (1 << 5) +/** + * @struct Error data for a batch index that caused the batch to be dropped. + */ +typedef struct rd_kafka_Produce_result_record_error { + int64_t batch_index; /**< Batch index */ + char *errstr; /**< Error message for batch_index */ +} rd_kafka_Produce_result_record_error_t; + +/** + * @struct Result and return values from ProduceResponse + */ +typedef struct rd_kafka_Produce_result { + int64_t offset; /**< Assigned offset of first message */ + int64_t timestamp; /**< (Possibly assigned) offset of first message */ + char *errstr; /**< Common error message */ + rd_kafka_Produce_result_record_error_t + *record_errors; /**< Errors for records that caused the batch to be + dropped */ + int32_t record_errors_cnt; /**< record_errors count */ +} rd_kafka_Produce_result_t; typedef struct rd_kafka_msg_s { rd_kafka_message_t rkm_rkmessage; /* MUST be first field */ @@ -122,6 +142,7 @@ typedef struct rd_kafka_msg_s { * identically reconstructed. */ int retries; /* Number of retries so far */ + const char *errstr; /* Error string for this message */ } producer; #define rkm_ts_timeout rkm_u.producer.ts_timeout #define rkm_ts_enq rkm_u.producer.ts_enq @@ -576,6 +597,16 @@ static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap(int64_t seq) { void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq); +rd_kafka_Produce_result_t *rd_kafka_Produce_result_new(int64_t offset, + int64_t timestamp); + +void rd_kafka_Produce_result_destroy(rd_kafka_Produce_result_t *result); + +rd_kafka_Produce_result_t * +rd_kafka_Produce_result_copy(const rd_kafka_Produce_result_t *result); + +/* Unit tests */ + rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize); void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq); int unittest_msg(void); diff --git a/lib/librdkafka-2.3.0/src/rdkafka_msgbatch.h b/lib/librdkafka-2.4.0/src/rdkafka_msgbatch.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_msgbatch.h rename to lib/librdkafka-2.4.0/src/rdkafka_msgbatch.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_msgset.h b/lib/librdkafka-2.4.0/src/rdkafka_msgset.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_msgset.h rename to lib/librdkafka-2.4.0/src/rdkafka_msgset.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_msgset_reader.c b/lib/librdkafka-2.4.0/src/rdkafka_msgset_reader.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_msgset_reader.c rename to lib/librdkafka-2.4.0/src/rdkafka_msgset_reader.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_msgset_writer.c b/lib/librdkafka-2.4.0/src/rdkafka_msgset_writer.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_msgset_writer.c rename to lib/librdkafka-2.4.0/src/rdkafka_msgset_writer.c index 21f16b5a81a..3a5f8b344d6 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_msgset_writer.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_msgset_writer.c @@ -45,7 +45,7 @@ /** @brief The maxium ProduceRequestion ApiVersion supported by librdkafka */ -static const int16_t rd_kafka_ProduceRequest_max_version = 7; +static const int16_t rd_kafka_ProduceRequest_max_version = 8; typedef struct rd_kafka_msgset_writer_s { @@ -267,6 +267,7 @@ static void rd_kafka_msgset_writer_alloc_buf(rd_kafka_msgset_writer_t *msetw) { * ProduceRequest header sizes */ switch (msetw->msetw_ApiVersion) { + case 8: case 7: case 6: case 5: diff --git a/lib/librdkafka-2.3.0/src/rdkafka_offset.c b/lib/librdkafka-2.4.0/src/rdkafka_offset.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_offset.c rename to lib/librdkafka-2.4.0/src/rdkafka_offset.c index 701a41613d3..3da38117ac4 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_offset.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_offset.c @@ -380,8 +380,6 @@ rd_kafka_commit0(rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; } - - /** * NOTE: 'offsets' may be NULL, see official documentation. */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_offset.h b/lib/librdkafka-2.4.0/src/rdkafka_offset.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_offset.h rename to lib/librdkafka-2.4.0/src/rdkafka_offset.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_op.c b/lib/librdkafka-2.4.0/src/rdkafka_op.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_op.c rename to lib/librdkafka-2.4.0/src/rdkafka_op.c index 34e9e3fd344..2fe3a4ac51b 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_op.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_op.c @@ -387,6 +387,8 @@ void rd_kafka_op_destroy(rd_kafka_op_t *rko) { if (rko->rko_u.dr.rkt) rd_kafka_topic_destroy0(rko->rko_u.dr.rkt); + if (rko->rko_u.dr.presult) + rd_kafka_Produce_result_destroy(rko->rko_u.dr.presult); break; case RD_KAFKA_OP_OFFSET_RESET: diff --git a/lib/librdkafka-2.3.0/src/rdkafka_op.h b/lib/librdkafka-2.4.0/src/rdkafka_op.h similarity index 97% rename from lib/librdkafka-2.3.0/src/rdkafka_op.h rename to lib/librdkafka-2.4.0/src/rdkafka_op.h index 3a1384362ad..8337586d585 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_op.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_op.h @@ -399,6 +399,7 @@ struct rd_kafka_op_s { rd_kafka_msgq_t msgq; rd_kafka_msgq_t msgq2; int do_purge2; + rd_kafka_Produce_result_t *presult; } dr; struct { @@ -564,6 +565,7 @@ struct rd_kafka_op_s { RD_KAFKA_MOCK_CMD_PART_SET_LEADER, RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER, RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS, + RD_KAFKA_MOCK_CMD_PART_PUSH_LEADER_RESPONSE, RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN, RD_KAFKA_MOCK_CMD_BROKER_SET_RTT, RD_KAFKA_MOCK_CMD_BROKER_SET_RACK, @@ -579,7 +581,9 @@ struct rd_kafka_op_s { * PART_SET_FOLLOWER * PART_SET_FOLLOWER_WMARKS * BROKER_SET_RACK - * COORD_SET (key_type) */ + * COORD_SET (key_type) + * PART_PUSH_LEADER_RESPONSE + */ char *str; /**< For: * COORD_SET (key) */ int32_t partition; /**< For: @@ -587,6 +591,7 @@ struct rd_kafka_op_s { * PART_SET_FOLLOWER_WMARKS * PART_SET_LEADER * APIVERSION_SET (ApiKey) + * PART_PUSH_LEADER_RESPONSE */ int32_t broker_id; /**< For: * PART_SET_FOLLOWER @@ -606,6 +611,12 @@ struct rd_kafka_op_s { * PART_SET_FOLLOWER_WMARKS * APIVERSION_SET (maxver) */ + int32_t leader_id; /**< Leader id, for: + * PART_PUSH_LEADER_RESPONSE + */ + int32_t leader_epoch; /**< Leader epoch, for: + * PART_PUSH_LEADER_RESPONSE + */ } mock; struct { diff --git a/lib/librdkafka-2.3.0/src/rdkafka_partition.c b/lib/librdkafka-2.4.0/src/rdkafka_partition.c similarity index 91% rename from lib/librdkafka-2.3.0/src/rdkafka_partition.c rename to lib/librdkafka-2.4.0/src/rdkafka_partition.c index b175ffbc798..49e6f76e6f2 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_partition.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_partition.c @@ -2299,7 +2299,22 @@ rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp, int flag, rd_kafka_replyq_t replyq) { int32_t version; - rd_kafka_op_t *rko; + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE); + + if (!pause) { + /* If partitions isn't paused, avoid bumping its version, + * as it'll result in resuming fetches from a stale + * next_fetch_start */ + rd_bool_t is_paused = rd_false; + rd_kafka_toppar_lock(rktp); + is_paused = RD_KAFKA_TOPPAR_IS_PAUSED(rktp); + rd_kafka_toppar_unlock(rktp); + if (!is_paused) { + rko->rko_replyq = replyq; + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + } /* Bump version barrier. */ version = rd_kafka_toppar_version_new_barrier(rktp); @@ -2310,7 +2325,6 @@ rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, version); - rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE); rko->rko_version = version; rko->rko_u.pause.pause = pause; rko->rko_u.pause.flag = flag; @@ -2568,7 +2582,17 @@ rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size) { return rktparlist; } +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_new_with_topic_id(rd_kafka_Uuid_t topic_id, + int32_t partition) { + rd_kafka_topic_partition_private_t *parpriv; + rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); + rktpar->partition = partition; + parpriv = rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; + return rktpar; +} rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic, int32_t partition) { @@ -2613,9 +2637,15 @@ rd_kafka_topic_partition_update(rd_kafka_topic_partition_t *dst, dstpriv->leader_epoch = srcpriv->leader_epoch; + dstpriv->current_leader_epoch = srcpriv->current_leader_epoch; + + dstpriv->topic_id = srcpriv->topic_id; + } else if ((dstpriv = dst->_private)) { - /* No private object in source, reset the leader epoch. */ - dstpriv->leader_epoch = -1; + /* No private object in source, reset the fields. */ + dstpriv->leader_epoch = -1; + dstpriv->current_leader_epoch = -1; + dstpriv->topic_id = RD_KAFKA_UUID_ZERO; } } @@ -2707,6 +2737,35 @@ int32_t rd_kafka_topic_partition_get_current_leader_epoch( return parpriv->current_leader_epoch; } +/** + * @brief Sets topic id for partition \p rktpar. + * + * @param rktpar Topic partition. + * @param topic_id Topic id to set. + */ +void rd_kafka_topic_partition_set_topic_id(rd_kafka_topic_partition_t *rktpar, + rd_kafka_Uuid_t topic_id) { + rd_kafka_topic_partition_private_t *parpriv; + parpriv = rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; +} + +/** + * @brief Gets topic id from topic-partition \p rktpar. + * + * @param rktpar Topic partition. + * @return Topic id, or RD_KAFKA_UUID_ZERO. + */ +rd_kafka_Uuid_t rd_kafka_topic_partition_get_topic_id( + const rd_kafka_topic_partition_t *rktpar) { + const rd_kafka_topic_partition_private_t *parpriv; + + if (!(parpriv = rktpar->_private)) + return RD_KAFKA_UUID_ZERO; + + return parpriv->topic_id; +} + void rd_kafka_topic_partition_set_current_leader_epoch( rd_kafka_topic_partition_t *rktpar, int32_t current_leader_epoch) { @@ -2823,7 +2882,8 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0( rktpar = &rktparlist->elems[rktparlist->cnt++]; memset(rktpar, 0, sizeof(*rktpar)); - rktpar->topic = rd_strdup(topic); + if (topic) + rktpar->topic = rd_strdup(topic); rktpar->partition = partition; rktpar->offset = RD_KAFKA_OFFSET_INVALID; @@ -2834,8 +2894,10 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0( parpriv_copy->rktp = rd_kafka_toppar_keep_fl(func, line, parpriv->rktp); } - parpriv_copy->leader_epoch = parpriv->leader_epoch; - parpriv_copy->current_leader_epoch = parpriv->leader_epoch; + parpriv_copy->leader_epoch = parpriv->leader_epoch; + parpriv_copy->current_leader_epoch = + parpriv->current_leader_epoch; + parpriv_copy->topic_id = parpriv->topic_id; } else if (rktp) { rd_kafka_topic_partition_private_t *parpriv_copy = rd_kafka_topic_partition_get_private(rktpar); @@ -2855,6 +2917,36 @@ rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, } +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_with_topic_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition) { + rd_kafka_topic_partition_t *rktpar; + rktpar = rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, rktparlist, NULL, partition, NULL, NULL); + rd_kafka_topic_partition_private_t *parpriv = + rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; + return rktpar; +} + + +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_list_add_with_topic_name_and_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + const char *topic, + int32_t partition) { + rd_kafka_topic_partition_t *rktpar; + rktpar = rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL); + rd_kafka_topic_partition_private_t *parpriv = + rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; + return rktpar; +} + + /** * Adds a consecutive list of partitions to a list */ @@ -2886,8 +2978,12 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert( /** * @brief Creates a copy of \p rktpar and adds it to \p rktparlist + * + * @return Copy of passed partition that was added to the list + * + * @remark Ownership of returned partition remains of the list. */ -void rd_kafka_topic_partition_list_add_copy( +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_copy( rd_kafka_topic_partition_list_t *rktparlist, const rd_kafka_topic_partition_t *rktpar) { rd_kafka_topic_partition_t *dst; @@ -2896,6 +2992,7 @@ void rd_kafka_topic_partition_list_add_copy( __FUNCTION__, __LINE__, rktparlist, rktpar->topic, rktpar->partition, NULL, rktpar->_private); rd_kafka_topic_partition_update(dst, rktpar); + return dst; } @@ -3013,6 +3110,25 @@ int rd_kafka_topic_partition_cmp(const void *_a, const void *_b) { return RD_CMP(a->partition, b->partition); } +/** + * @brief Compare topic partitions \p a and \p b by topic id first + * and then by partition. + */ +int rd_kafka_topic_partition_by_id_cmp(const void *_a, const void *_b) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_topic_partition_t *b = _b; + rd_kafka_Uuid_t topic_id_a = rd_kafka_topic_partition_get_topic_id(a); + rd_kafka_Uuid_t topic_id_b = rd_kafka_topic_partition_get_topic_id(b); + int are_topic_ids_different = rd_kafka_Uuid_cmp(topic_id_a, topic_id_b); + return are_topic_ids_different || RD_CMP(a->partition, b->partition); +} + +static int rd_kafka_topic_partition_by_id_cmp_opaque(const void *_a, + const void *_b, + void *opaque) { + return rd_kafka_topic_partition_by_id_cmp(_a, _b); +} + /** @brief Compare only the topic */ int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) { const rd_kafka_topic_partition_t *a = _a; @@ -3020,19 +3136,36 @@ int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) { return strcmp(a->topic, b->topic); } +/** @brief Compare only the topic id */ +int rd_kafka_topic_partition_cmp_topic_id(const void *_a, const void *_b) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_topic_partition_t *b = _b; + return rd_kafka_Uuid_cmp(rd_kafka_topic_partition_get_topic_id(a), + rd_kafka_topic_partition_get_topic_id(b)); +} + static int rd_kafka_topic_partition_cmp_opaque(const void *_a, const void *_b, void *opaque) { return rd_kafka_topic_partition_cmp(_a, _b); } -/** @returns a hash of the topic and partition */ +/** @returns a hash of the topic name and partition */ unsigned int rd_kafka_topic_partition_hash(const void *_a) { const rd_kafka_topic_partition_t *a = _a; int r = 31 * 17 + a->partition; return 31 * r + rd_string_hash(a->topic, -1); } +/** @returns a hash of the topic id and partition */ +unsigned int rd_kafka_topic_partition_hash_by_id(const void *_a) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id(a); + int r = 31 * 17 + a->partition; + return 31 * r + rd_kafka_Uuid_hash(&topic_id); +} + /** @@ -3058,6 +3191,31 @@ static int rd_kafka_topic_partition_list_find0( return -1; } +/** + * @brief Search 'rktparlist' for \p topic_id and \p partition with comparator + * \p cmp. + * @returns the elems[] index or -1 on miss. + */ +static int rd_kafka_topic_partition_list_find_by_id0( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition, + int (*cmp)(const void *, const void *)) { + int i, ret = -1; + rd_kafka_topic_partition_t *rktpar = + rd_kafka_topic_partition_new_with_topic_id(topic_id, partition); + + for (i = 0; i < rktparlist->cnt; i++) { + if (!cmp(rktpar, &rktparlist->elems[i])) { + ret = i; + break; + } + } + + rd_kafka_topic_partition_destroy(rktpar); + return ret; +} + rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, @@ -3070,6 +3228,22 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( return &rktparlist->elems[i]; } +/** + * @brief Search 'rktparlist' for 'topic_id' and 'partition'. + * @returns Found topic partition or NULL. + */ +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition) { + int i = rd_kafka_topic_partition_list_find_by_id0( + rktparlist, topic_id, partition, + rd_kafka_topic_partition_by_id_cmp); + if (i == -1) + return NULL; + else + return &rktparlist->elems[i]; +} int rd_kafka_topic_partition_list_find_idx( const rd_kafka_topic_partition_list_t *rktparlist, @@ -3079,11 +3253,24 @@ int rd_kafka_topic_partition_list_find_idx( rktparlist, topic, partition, rd_kafka_topic_partition_cmp); } +/** + * @brief Search 'rktparlist' for \p topic_id and \p partition. + * @returns the elems[] index or -1 on miss. + */ +int rd_kafka_topic_partition_list_find_idx_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition) { + return rd_kafka_topic_partition_list_find_by_id0( + rktparlist, topic_id, partition, + rd_kafka_topic_partition_by_id_cmp); +} + /** * @returns the first element that matches \p topic, regardless of partition. */ -rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic( +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_name( const rd_kafka_topic_partition_list_t *rktparlist, const char *topic) { int i = rd_kafka_topic_partition_list_find0( @@ -3095,6 +3282,21 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic( return &rktparlist->elems[i]; } +/** + * @returns the first element that matches \p topic_id, regardless of partition. + */ +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + const rd_kafka_Uuid_t topic_id) { + int i = rd_kafka_topic_partition_list_find_by_id0( + rktparlist, topic_id, RD_KAFKA_PARTITION_UA, + rd_kafka_topic_partition_cmp_topic_id); + if (i == -1) + return NULL; + else + return &rktparlist->elems[i]; +} + int rd_kafka_topic_partition_list_del_by_idx( rd_kafka_topic_partition_list_t *rktparlist, @@ -3185,6 +3387,12 @@ void rd_kafka_topic_partition_list_sort_by_topic( rktparlist, rd_kafka_topic_partition_cmp_opaque, NULL); } +void rd_kafka_topic_partition_list_sort_by_topic_id( + rd_kafka_topic_partition_list_t *rktparlist) { + rd_kafka_topic_partition_list_sort( + rktparlist, rd_kafka_topic_partition_by_id_cmp_opaque, NULL); +} + rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( rd_kafka_topic_partition_list_t *rktparlist, const char *topic, @@ -3940,11 +4148,16 @@ const char *rd_kafka_topic_partition_list_str( int i; size_t of = 0; + if (!rktparlist->cnt) + dest[0] = '\0'; for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; char errstr[128]; char offsetstr[32]; + const char *topic_id_str = NULL; + const rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id(rktpar); int r; if (!rktpar->err && (fmt_flags & RD_KAFKA_FMT_F_ONLY_ERR)) @@ -3962,14 +4175,19 @@ const char *rd_kafka_topic_partition_list_str( else offsetstr[0] = '\0'; + + if (!RD_KAFKA_UUID_IS_ZERO(topic_id)) + topic_id_str = rd_kafka_Uuid_base64str(&topic_id); + r = rd_snprintf(&dest[of], dest_size - of, "%s" - "%s[%" PRId32 + "%s(%s)[%" PRId32 "]" "%s" "%s", of == 0 ? "" : ", ", rktpar->topic, - rktpar->partition, offsetstr, errstr); + topic_id_str, rktpar->partition, offsetstr, + errstr); if ((size_t)r >= dest_size - of) { rd_snprintf(&dest[dest_size - 4], 4, "..."); @@ -4029,6 +4247,8 @@ void rd_kafka_topic_partition_list_update( s_priv = rd_kafka_topic_partition_get_private(s); d_priv = rd_kafka_topic_partition_get_private(d); d_priv->leader_epoch = s_priv->leader_epoch; + d_priv->current_leader_epoch = s_priv->current_leader_epoch; + d_priv->topic_id = s_priv->topic_id; } } @@ -4349,3 +4569,161 @@ const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos) { return ret[idx]; } + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + void *) map_toppar_void_t; + +/** + * @brief Calculates \p a ∩ \p b using \p cmp and \p hash . + * Ordered following \p a order. Elements are copied from \p a. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection0( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b, + int(cmp)(const void *_a, const void *_b), + unsigned int(hash)(const void *_a)) { + rd_kafka_topic_partition_t *rktpar; + rd_kafka_topic_partition_list_t *ret = + rd_kafka_topic_partition_list_new(a->cnt < b->cnt ? a->cnt + : b->cnt); + map_toppar_void_t b_map = + RD_MAP_INITIALIZER(b->cnt, cmp, hash, NULL, NULL); + RD_KAFKA_TPLIST_FOREACH(rktpar, b) { + RD_MAP_SET(&b_map, rktpar, rktpar); + } + RD_KAFKA_TPLIST_FOREACH(rktpar, a) { + if ((RD_MAP_GET(&b_map, rktpar) != NULL) == 1) { + rd_kafka_topic_partition_list_add_copy(ret, rktpar); + } + } + RD_MAP_DESTROY(&b_map); + return ret; +} + +/** + * @brief Calculates \p a - \p b using \p cmp and \p hash . + * Ordered following \p a order. Elements are copied from \p a. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_difference0(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b, + int(cmp)(const void *_a, + const void *_b), + unsigned int(hash)(const void *_a)) { + rd_kafka_topic_partition_t *rktpar; + rd_kafka_topic_partition_list_t *ret = + rd_kafka_topic_partition_list_new(a->cnt); + map_toppar_void_t b_map = + RD_MAP_INITIALIZER(b->cnt, cmp, hash, NULL, NULL); + RD_KAFKA_TPLIST_FOREACH(rktpar, b) { + RD_MAP_SET(&b_map, rktpar, rktpar); + } + RD_KAFKA_TPLIST_FOREACH(rktpar, a) { + if ((RD_MAP_GET(&b_map, rktpar) != NULL) == 0) { + rd_kafka_topic_partition_list_add_copy(ret, rktpar); + } + } + RD_MAP_DESTROY(&b_map); + return ret; +} + +/** + * @brief Calculates \p a ∪ \p b using \p cmp and \p hash . + * Ordered following \p a order for elements in \p a + * and \p b order for elements only in \p b. + * Elements are copied the same way. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union0(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b, + int(cmp)(const void *_a, const void *_b), + unsigned int(hash)(const void *_a)) { + + rd_kafka_topic_partition_list_t *b_minus_a = + rd_kafka_topic_partition_list_difference0(b, a, cmp, hash); + rd_kafka_topic_partition_list_t *ret = + rd_kafka_topic_partition_list_new(a->cnt + b_minus_a->cnt); + + rd_kafka_topic_partition_list_add_list(ret, a); + rd_kafka_topic_partition_list_add_list(ret, b_minus_a); + + rd_kafka_topic_partition_list_destroy(b_minus_a); + return ret; +} + +/** + * @brief Calculates \p a ∩ \p b using topic name and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_intersection0( + a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash); +} + +/** + * @brief Calculates \p a - \p b using topic name and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_difference_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_difference0( + a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash); +} + +/** + * @brief Calculates \p a ∪ \p b using topic name and partition id. + * Ordered following \p a order for elements in \p a + * and \p b order for elements only in \p b. + * Elements are copied the same way. + */ +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_union_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_union0( + a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash); +} + +/** + * @brief Calculates \p a ∩ \p b using topic id and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_intersection0( + a, b, rd_kafka_topic_partition_by_id_cmp, + rd_kafka_topic_partition_hash_by_id); +} + +/** + * @brief Calculates \p a - \p b using topic id and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_difference_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_difference0( + a, b, rd_kafka_topic_partition_by_id_cmp, + rd_kafka_topic_partition_hash_by_id); +} + +/** + * @brief Calculates \p a ∪ \p b using topic id and partition id. + * Ordered following \p a order for elements in \p a + * and \p b order for elements only in \p b. + * Elements are copied the same way. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union_by_id(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_union0( + a, b, rd_kafka_topic_partition_by_id_cmp, + rd_kafka_topic_partition_hash_by_id); +} diff --git a/lib/librdkafka-2.3.0/src/rdkafka_partition.h b/lib/librdkafka-2.4.0/src/rdkafka_partition.h similarity index 93% rename from lib/librdkafka-2.3.0/src/rdkafka_partition.h rename to lib/librdkafka-2.4.0/src/rdkafka_partition.h index 638c86eb352..b74daf8e2f5 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_partition.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_partition.h @@ -495,6 +495,8 @@ typedef struct rd_kafka_topic_partition_private_s { int32_t current_leader_epoch; /** Leader epoch if known, else -1. */ int32_t leader_epoch; + /** Topic id. */ + rd_kafka_Uuid_t topic_id; } rd_kafka_topic_partition_private_t; @@ -680,6 +682,13 @@ void *rd_kafka_topic_partition_copy_void(const void *src); void rd_kafka_topic_partition_destroy_free(void *ptr); rd_kafka_topic_partition_t * rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp); +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_new_with_topic_id(rd_kafka_Uuid_t topic_id, + int32_t partition); +void rd_kafka_topic_partition_set_topic_id(rd_kafka_topic_partition_t *rktpar, + rd_kafka_Uuid_t topic_id); +rd_kafka_Uuid_t +rd_kafka_topic_partition_get_topic_id(const rd_kafka_topic_partition_t *rktpar); void rd_kafka_topic_partition_list_init( rd_kafka_topic_partition_list_t *rktparlist, @@ -698,12 +707,24 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0( rd_kafka_toppar_t *rktp, const rd_kafka_topic_partition_private_t *parpriv); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_with_topic_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition); + +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_list_add_with_topic_name_and_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + const char *topic, + int32_t partition); + rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert( rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition); -void rd_kafka_topic_partition_list_add_copy( +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_copy( rd_kafka_topic_partition_list_t *rktparlist, const rd_kafka_topic_partition_t *rktpar); @@ -739,19 +760,38 @@ int rd_kafka_topic_partition_match(rd_kafka_t *rk, int rd_kafka_topic_partition_cmp(const void *_a, const void *_b); +int rd_kafka_topic_partition_by_id_cmp(const void *_a, const void *_b); unsigned int rd_kafka_topic_partition_hash(const void *a); int rd_kafka_topic_partition_list_find_idx( const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition); -rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic( + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition); + +int rd_kafka_topic_partition_list_find_idx_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition); + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_name( const rd_kafka_topic_partition_list_t *rktparlist, const char *topic); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id); + void rd_kafka_topic_partition_list_sort_by_topic( rd_kafka_topic_partition_list_t *rktparlist); +void rd_kafka_topic_partition_list_sort_by_topic_id( + rd_kafka_topic_partition_list_t *rktparlist); + void rd_kafka_topic_partition_list_reset_offsets( rd_kafka_topic_partition_list_t *rktparlist, int64_t offset); @@ -770,6 +810,20 @@ int rd_kafka_topic_partition_list_cmp(const void *_a, const void *_b, int (*cmp)(const void *, const void *)); +/** + * Creates a new empty topic partition private. + * + * @remark This struct is dynamically allocated and hence should be freed. + */ +static RD_UNUSED RD_INLINE rd_kafka_topic_partition_private_t * +rd_kafka_topic_partition_private_new() { + rd_kafka_topic_partition_private_t *parpriv; + parpriv = rd_calloc(1, sizeof(*parpriv)); + parpriv->leader_epoch = -1; + parpriv->current_leader_epoch = -1; + return parpriv; +} + /** * @returns (and creates if necessary) the ._private glue object. */ @@ -778,9 +832,8 @@ rd_kafka_topic_partition_get_private(rd_kafka_topic_partition_t *rktpar) { rd_kafka_topic_partition_private_t *parpriv; if (!(parpriv = rktpar->_private)) { - parpriv = rd_calloc(1, sizeof(*parpriv)); - parpriv->leader_epoch = -1; - rktpar->_private = parpriv; + parpriv = rd_kafka_topic_partition_private_new(); + rktpar->_private = parpriv; } return parpriv; @@ -811,7 +864,6 @@ void rd_kafka_topic_partition_set_current_leader_epoch( rd_kafka_topic_partition_t *rktpar, int32_t leader_epoch); - /** * @returns the partition's rktp if set (no refcnt increase), else NULL. */ @@ -1089,4 +1141,31 @@ static RD_UNUSED RD_INLINE void rd_kafka_toppar_set_offset_validation_position( rktp->rktp_offset_validation_pos = offset_validation_pos; } +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_difference_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union_by_name(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_difference_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union_by_id(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + #endif /* _RDKAFKA_PARTITION_H_ */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_pattern.c b/lib/librdkafka-2.4.0/src/rdkafka_pattern.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_pattern.c rename to lib/librdkafka-2.4.0/src/rdkafka_pattern.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_pattern.h b/lib/librdkafka-2.4.0/src/rdkafka_pattern.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_pattern.h rename to lib/librdkafka-2.4.0/src/rdkafka_pattern.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_plugin.c b/lib/librdkafka-2.4.0/src/rdkafka_plugin.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_plugin.c rename to lib/librdkafka-2.4.0/src/rdkafka_plugin.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_plugin.h b/lib/librdkafka-2.4.0/src/rdkafka_plugin.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_plugin.h rename to lib/librdkafka-2.4.0/src/rdkafka_plugin.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_proto.h b/lib/librdkafka-2.4.0/src/rdkafka_proto.h similarity index 88% rename from lib/librdkafka-2.3.0/src/rdkafka_proto.h rename to lib/librdkafka-2.4.0/src/rdkafka_proto.h index e6caf509e30..686e9c7b629 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_proto.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_proto.h @@ -156,21 +156,22 @@ static RD_UNUSED const char *rd_kafka_ApiKey2str(int16_t ApiKey) { "DescribeUserScramCredentialsRequest", [RD_KAFKAP_AlterUserScramCredentials] = "AlterUserScramCredentialsRequest", - [RD_KAFKAP_Vote] = "VoteRequest", - [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", - [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", - [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", - [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", - [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", - [RD_KAFKAP_Envelope] = "EnvelopeRequest", - [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot", - [RD_KAFKAP_DescribeCluster] = "DescribeCluster", - [RD_KAFKAP_DescribeProducers] = "DescribeProducers", - [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat", - [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker", - [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions", - [RD_KAFKAP_ListTransactions] = "ListTransactions", - [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds", + [RD_KAFKAP_Vote] = "VoteRequest", + [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", + [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", + [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", + [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", + [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", + [RD_KAFKAP_Envelope] = "EnvelopeRequest", + [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot", + [RD_KAFKAP_DescribeCluster] = "DescribeCluster", + [RD_KAFKAP_DescribeProducers] = "DescribeProducers", + [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat", + [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker", + [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions", + [RD_KAFKAP_ListTransactions] = "ListTransactions", + [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds", + [RD_KAFKAP_ConsumerGroupHeartbeat] = "ConsumerGroupHeartbeat", }; static RD_TLS char ret[64]; @@ -584,10 +585,60 @@ typedef struct rd_kafka_Uuid_s { } rd_kafka_Uuid_t; #define RD_KAFKA_UUID_ZERO \ - { 0, 0, "" } + (rd_kafka_Uuid_t) { \ + 0, 0, "" \ + } + +#define RD_KAFKA_UUID_IS_ZERO(uuid) \ + (!rd_kafka_Uuid_cmp(uuid, RD_KAFKA_UUID_ZERO)) #define RD_KAFKA_UUID_METADATA_TOPIC_ID \ - { 0, 1, "" } + (rd_kafka_Uuid_t) { \ + 0, 1, "" \ + } + +static RD_INLINE RD_UNUSED int rd_kafka_Uuid_cmp(rd_kafka_Uuid_t a, + rd_kafka_Uuid_t b) { + if (a.most_significant_bits < b.most_significant_bits) + return -1; + if (a.most_significant_bits > b.most_significant_bits) + return 1; + if (a.least_significant_bits < b.least_significant_bits) + return -1; + if (a.least_significant_bits > b.least_significant_bits) + return 1; + return 0; +} + +static RD_INLINE RD_UNUSED int rd_kafka_Uuid_ptr_cmp(void *a, void *b) { + rd_kafka_Uuid_t *a_uuid = a, *b_uuid = b; + return rd_kafka_Uuid_cmp(*a_uuid, *b_uuid); +} + +rd_kafka_Uuid_t rd_kafka_Uuid_random(); + +const char *rd_kafka_Uuid_str(const rd_kafka_Uuid_t *uuid); + +unsigned int rd_kafka_Uuid_hash(const rd_kafka_Uuid_t *uuid); + +unsigned int rd_kafka_Uuid_map_hash(const void *key); + +/** + * @brief UUID copier for rd_list_copy() + */ +static RD_UNUSED void *rd_list_Uuid_copy(const void *elem, void *opaque) { + return (void *)rd_kafka_Uuid_copy((rd_kafka_Uuid_t *)elem); +} + +static RD_INLINE RD_UNUSED void rd_list_Uuid_destroy(void *uuid) { + rd_kafka_Uuid_destroy((rd_kafka_Uuid_t *)uuid); +} + +static RD_INLINE RD_UNUSED int rd_list_Uuid_cmp(const void *uuid1, + const void *uuid2) { + return rd_kafka_Uuid_cmp(*((rd_kafka_Uuid_t *)uuid1), + *((rd_kafka_Uuid_t *)uuid2)); +} /** diff --git a/lib/librdkafka-2.3.0/src/rdkafka_protocol.h b/lib/librdkafka-2.4.0/src/rdkafka_protocol.h similarity index 98% rename from lib/librdkafka-2.3.0/src/rdkafka_protocol.h rename to lib/librdkafka-2.4.0/src/rdkafka_protocol.h index 99c6aa16a20..5ca902ddaa7 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_protocol.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_protocol.h @@ -113,8 +113,9 @@ #define RD_KAFKAP_DescribeTransactions 65 #define RD_KAFKAP_ListTransactions 66 #define RD_KAFKAP_AllocateProducerIds 67 +#define RD_KAFKAP_ConsumerGroupHeartbeat 68 -#define RD_KAFKAP__NUM 68 +#define RD_KAFKAP__NUM 69 #endif /* _RDKAFKA_PROTOCOL_H_ */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_queue.c b/lib/librdkafka-2.4.0/src/rdkafka_queue.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_queue.c rename to lib/librdkafka-2.4.0/src/rdkafka_queue.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_queue.h b/lib/librdkafka-2.4.0/src/rdkafka_queue.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_queue.h rename to lib/librdkafka-2.4.0/src/rdkafka_queue.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_range_assignor.c b/lib/librdkafka-2.4.0/src/rdkafka_range_assignor.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_range_assignor.c rename to lib/librdkafka-2.4.0/src/rdkafka_range_assignor.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_request.c b/lib/librdkafka-2.4.0/src/rdkafka_request.c similarity index 88% rename from lib/librdkafka-2.3.0/src/rdkafka_request.c rename to lib/librdkafka-2.4.0/src/rdkafka_request.c index b9e250a9e50..5ac7e0f1944 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_request.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_request.c @@ -144,6 +144,7 @@ int rd_kafka_err_action(rd_kafka_broker_t *rkb, break; case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS: + case RD_KAFKA_RESP_ERR_INVALID_MSG: /* Client-side wait-response/in-queue timeout */ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: actions |= RD_KAFKA_ERR_ACTION_RETRY | @@ -211,12 +212,17 @@ int rd_kafka_err_action(rd_kafka_broker_t *rkb, */ rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( rd_kafka_buf_t *rkbuf, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, size_t estimated_part_cnt, const rd_kafka_topic_partition_field_t *fields) { const int log_decode_errors = LOG_ERR; int32_t TopicArrayCnt; rd_kafka_topic_partition_list_t *parts = NULL; + /* We assume here that the topic partition list is not NULL. + * FIXME: check NULL topic array case, if required in future. */ + rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); parts = rd_kafka_topic_partition_list_new( @@ -225,13 +231,20 @@ rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( while (TopicArrayCnt-- > 0) { rd_kafkap_str_t kTopic; int32_t PartArrayCnt; - char *topic; + char *topic = NULL; + rd_kafka_Uuid_t topic_id; + + if (use_topic_id) { + rd_kafka_buf_read_uuid(rkbuf, &topic_id); + } + if (use_topic_name) { + rd_kafka_buf_read_str(rkbuf, &kTopic); + RD_KAFKAP_STR_DUPA(&topic, &kTopic); + } - rd_kafka_buf_read_str(rkbuf, &kTopic); rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt, RD_KAFKAP_PARTITIONS_MAX); - RD_KAFKAP_STR_DUPA(&topic, &kTopic); while (PartArrayCnt-- > 0) { int32_t Partition = -1, Epoch = -1234, @@ -269,15 +282,31 @@ rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA: rd_assert(!*"metadata not implemented"); break; - case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP: + case RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP: + rd_assert( + !*"timestamp not implemented"); break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP: + /* Fallback */ case RD_KAFKA_TOPIC_PARTITION_FIELD_END: break; } } - rktpar = rd_kafka_topic_partition_list_add(parts, topic, - Partition); + if (use_topic_id) { + rktpar = + rd_kafka_topic_partition_list_add_with_topic_id( + parts, topic_id, Partition); + if (use_topic_name) + rktpar->topic = rd_strdup(topic); + } else if (use_topic_name) { + rktpar = rd_kafka_topic_partition_list_add( + parts, topic, Partition); + } else { + rd_assert(!*"one of use_topic_id and " + "use_topic_name should be true"); + } + /* Use dummy sentinel values that are unlikely to be * seen from the broker to know if we are to set these * fields or not. */ @@ -291,8 +320,8 @@ rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( rktpar, CurrentLeaderEpoch); rktpar->err = ErrorCode; - - rd_kafka_buf_skip_tags(rkbuf); + if (fi > 1) + rd_kafka_buf_skip_tags(rkbuf); } rd_kafka_buf_skip_tags(rkbuf); @@ -313,20 +342,23 @@ rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( * * @returns the number of partitions written to buffer. * - * @remark The \p parts list MUST be sorted. + * @remark The \p parts list MUST be sorted by name if use_topic_id is false or + * by id. */ int rd_kafka_buf_write_topic_partitions( rd_kafka_buf_t *rkbuf, const rd_kafka_topic_partition_list_t *parts, rd_bool_t skip_invalid_offsets, rd_bool_t only_invalid_offsets, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, const rd_kafka_topic_partition_field_t *fields) { size_t of_TopicArrayCnt; size_t of_PartArrayCnt = 0; int TopicArrayCnt = 0, PartArrayCnt = 0; int i; - const char *prev_topic = NULL; - int cnt = 0; + const rd_kafka_topic_partition_t *prev_topic = NULL; + int cnt = 0; rd_assert(!only_invalid_offsets || (only_invalid_offsets != skip_invalid_offsets)); @@ -336,6 +368,7 @@ int rd_kafka_buf_write_topic_partitions( for (i = 0; i < parts->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = &parts->elems[i]; + rd_bool_t different_topics; int fi; if (rktpar->offset < 0) { @@ -344,7 +377,19 @@ int rd_kafka_buf_write_topic_partitions( } else if (only_invalid_offsets) continue; - if (!prev_topic || strcmp(rktpar->topic, prev_topic)) { + if (use_topic_id) { + different_topics = + !prev_topic || + rd_kafka_Uuid_cmp( + rd_kafka_topic_partition_get_topic_id(rktpar), + rd_kafka_topic_partition_get_topic_id( + prev_topic)); + } else { + different_topics = + !prev_topic || + strcmp(rktpar->topic, prev_topic->topic); + } + if (different_topics) { /* Finish previous topic, if any. */ if (of_PartArrayCnt > 0) { rd_kafka_buf_finalize_arraycnt( @@ -355,9 +400,18 @@ int rd_kafka_buf_write_topic_partitions( /* Topic */ - rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); + if (use_topic_name) + rd_kafka_buf_write_str(rkbuf, rktpar->topic, + -1); + if (use_topic_id) { + rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id( + rktpar); + rd_kafka_buf_write_uuid(rkbuf, &topic_id); + } + TopicArrayCnt++; - prev_topic = rktpar->topic; + prev_topic = rktpar; /* New topic so reset partition count */ PartArrayCnt = 0; @@ -395,6 +449,11 @@ int rd_kafka_buf_write_topic_partitions( case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR: rd_kafka_buf_write_i16(rkbuf, rktpar->err); break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP: + /* Current implementation is just + * sending a NULL value */ + rd_kafka_buf_write_i64(rkbuf, -1); + break; case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA: /* Java client 0.9.0 and broker <0.10.0 can't * parse Null metadata fields, so as a @@ -950,7 +1009,8 @@ rd_kafka_resp_err_t rd_kafka_handle_OffsetForLeaderEpoch( : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - *offsets = rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields); + *offsets = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields); if (!*offsets) goto err_parse; @@ -1004,7 +1064,8 @@ void rd_kafka_OffsetForLeaderEpochRequest( RD_KAFKA_TOPIC_PARTITION_FIELD_END}; rd_kafka_buf_write_topic_partitions( rkbuf, parts, rd_false /*include invalid offsets*/, - rd_false /*skip valid offsets */, fields); + rd_false /*skip valid offsets*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -1038,6 +1099,7 @@ rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, rd_bool_t add_part, rd_bool_t allow_retry) { const int log_decode_errors = LOG_ERR; + int32_t GroupArrayCnt; int32_t TopicArrayCnt; int64_t offset = RD_KAFKA_OFFSET_INVALID; int16_t ApiVersion; @@ -1055,6 +1117,13 @@ rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, if (ApiVersion >= 3) rd_kafka_buf_read_throttle_time(rkbuf); + if (ApiVersion >= 8) { + rd_kafkap_str_t group_id; + // Currently we are supporting only 1 group + rd_kafka_buf_read_arraycnt(rkbuf, &GroupArrayCnt, 1); + rd_kafka_buf_read_str(rkbuf, &group_id); + } + if (!*offsets) *offsets = rd_kafka_topic_partition_list_new(16); @@ -1066,12 +1135,17 @@ rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); for (i = 0; i < TopicArrayCnt; i++) { rd_kafkap_str_t topic; + rd_kafka_Uuid_t *topic_id = NULL; int32_t PartArrayCnt; char *topic_name; int j; rd_kafka_buf_read_str(rkbuf, &topic); - + // if(ApiVersion >= 9) { + // topic_id = rd_kafka_Uuid_new(); + // rd_kafka_buf_read_uuid(rkbuf, + // topic_id); + // } rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt, RD_KAFKAP_PARTITIONS_MAX); @@ -1094,10 +1168,18 @@ rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, rktpar = rd_kafka_topic_partition_list_find( *offsets, topic_name, partition); - if (!rktpar && add_part) - rktpar = rd_kafka_topic_partition_list_add( - *offsets, topic_name, partition); - else if (!rktpar) { + if (!rktpar && add_part) { + if (topic_id) { + rktpar = + rd_kafka_topic_partition_list_add_with_topic_id( + *offsets, *topic_id, partition); + } else { + rktpar = + rd_kafka_topic_partition_list_add( + *offsets, topic_name, + partition); + } + } else if (!rktpar) { rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", "OffsetFetchResponse: %s [%" PRId32 "] " @@ -1159,6 +1241,8 @@ rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, /* Loose ref from get_toppar() */ if (rktp) rd_kafka_toppar_destroy(rktp); + + RD_IF_FREE(topic_id, rd_kafka_Uuid_destroy); } rd_kafka_buf_skip_tags(rkbuf); @@ -1284,6 +1368,9 @@ void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, * have usable offsets then no request is sent at all but an empty * reply is enqueued on the replyq. * + * FIXME: Even though the version is upgraded to v9, currently we support + * only a single group. + * * @param group_id Request offset for this group id. * @param parts (optional) List of topic partitions to request, * or NULL to return all topic partitions associated with the @@ -1295,10 +1382,18 @@ void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, const char *group_id, rd_kafka_topic_partition_list_t *parts, + rd_bool_t use_topic_id, + int32_t generation_id_or_member_epoch, + rd_kafkap_str_t *member_id, rd_bool_t require_stable_offsets, int timeout, rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, + void (*resp_cb)(rd_kafka_t *, + rd_kafka_broker_t *, + rd_kafka_resp_err_t, + rd_kafka_buf_t *, + rd_kafka_buf_t *, + void *), void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion; @@ -1306,7 +1401,7 @@ void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, int PartCnt = -1; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_OffsetFetch, 0, 7, NULL); + rkb, RD_KAFKAP_OffsetFetch, 0, 9, NULL); if (parts) { parts_size = parts->cnt * 32; @@ -1314,13 +1409,34 @@ void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, rkbuf = rd_kafka_buf_new_flexver_request( rkb, RD_KAFKAP_OffsetFetch, 1, - /* GroupId + rd_kafka_buf_write_arraycnt_pos + - * Topics + RequireStable */ - 32 + 4 + parts_size + 1, ApiVersion >= 6 /*flexver*/); + /* GroupId + GenerationIdOrMemberEpoch + MemberId + + * rd_kafka_buf_write_arraycnt_pos + Topics + RequireStable */ + 32 + 4 + 50 + 4 + parts_size + 1, ApiVersion >= 6 /*flexver*/); + + if (ApiVersion >= 8) { + /* + * Groups array count. + * Currently, only supporting 1 group. + * TODO: Update to use multiple groups. + */ + rd_kafka_buf_write_arraycnt(rkbuf, 1); + } /* ConsumerGroup */ rd_kafka_buf_write_str(rkbuf, group_id, -1); + if (ApiVersion >= 9) { + if (!member_id) { + rd_kafkap_str_t *null_member_id = + rd_kafkap_str_new(NULL, -1); + rd_kafka_buf_write_kstr(rkbuf, null_member_id); + rd_kafkap_str_destroy(null_member_id); + } else { + rd_kafka_buf_write_kstr(rkbuf, member_id); + } + rd_kafka_buf_write_i32(rkbuf, generation_id_or_member_epoch); + } + if (parts) { /* Sort partitions by topic */ rd_kafka_topic_partition_list_sort_by_topic(parts); @@ -1332,11 +1448,18 @@ void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; PartCnt = rd_kafka_buf_write_topic_partitions( rkbuf, parts, rd_false /*include invalid offsets*/, - rd_false /*skip valid offsets */, fields); + rd_false /*skip valid offsets */, + use_topic_id /* use_topic id */, rd_true /*use topic name*/, + fields); } else { rd_kafka_buf_write_arraycnt(rkbuf, PartCnt); } + if (ApiVersion >= 8) { + // Tags for the groups array + rd_kafka_buf_write_tags(rkbuf); + } + if (ApiVersion >= 7) { /* RequireStable */ rd_kafka_buf_write_i8(rkbuf, require_stable_offsets); @@ -1473,12 +1596,16 @@ rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, rd_kafka_buf_t *request, rd_kafka_topic_partition_list_t *offsets, rd_bool_t ignore_cgrp) { - const int log_decode_errors = LOG_ERR; - int32_t TopicArrayCnt; - int errcnt = 0; - int partcnt = 0; - int i; - int actions = 0; + const int log_decode_errors = LOG_ERR; + int errcnt = 0; + int partcnt = 0; + int actions = 0; + rd_kafka_topic_partition_list_t *partitions = NULL; + rd_kafka_topic_partition_t *partition = NULL; + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; if (err) goto err; @@ -1486,49 +1613,37 @@ rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) rd_kafka_buf_read_throttle_time(rkbuf); - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - for (i = 0; i < TopicArrayCnt; i++) { - rd_kafkap_str_t topic; - char *topic_str; - int32_t PartArrayCnt; - int j; + partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true /*use topic name*/, + 0, fields); - rd_kafka_buf_read_str(rkbuf, &topic); - rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt); - - RD_KAFKAP_STR_DUPA(&topic_str, &topic); - - for (j = 0; j < PartArrayCnt; j++) { - int32_t partition; - int16_t ErrorCode; - rd_kafka_topic_partition_t *rktpar; + if (!partitions) + goto err_parse; - rd_kafka_buf_read_i32(rkbuf, &partition); - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + partcnt = partitions->cnt; + RD_KAFKA_TPLIST_FOREACH(partition, partitions) { + rd_kafka_topic_partition_t *rktpar; - rktpar = rd_kafka_topic_partition_list_find( - offsets, topic_str, partition); - - if (!rktpar) { - /* Received offset for topic/partition we didn't - * ask for, this shouldn't really happen. */ - continue; - } + rktpar = rd_kafka_topic_partition_list_find( + offsets, partition->topic, partition->partition); - rktpar->err = ErrorCode; - if (ErrorCode) { - err = ErrorCode; - errcnt++; - - /* Accumulate actions for per-partition - * errors. */ - actions |= rd_kafka_handle_OffsetCommit_error( - rkb, request, rktpar); - } + if (!rktpar) { + /* Received offset for topic/partition we didn't + * ask for, this shouldn't really happen. */ + continue; + } - partcnt++; + if (partition->err) { + rktpar->err = partition->err; + err = partition->err; + errcnt++; + /* Accumulate actions for per-partition + * errors. */ + actions |= rd_kafka_handle_OffsetCommit_error( + rkb, request, partition); } } + rd_kafka_topic_partition_list_destroy(partitions); /* If all partitions failed use error code * from last partition as the global error. */ @@ -1596,23 +1711,18 @@ int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, void *opaque, const char *reason) { rd_kafka_buf_t *rkbuf; - ssize_t of_TopicCnt = -1; - int TopicCnt = 0; - const char *last_topic = NULL; - ssize_t of_PartCnt = -1; - int PartCnt = 0; - int tot_PartCnt = 0; - int i; + int tot_PartCnt = 0; int16_t ApiVersion; int features; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_OffsetCommit, 0, 7, &features); + rkb, RD_KAFKAP_OffsetCommit, 0, 9, &features); rd_kafka_assert(NULL, offsets != NULL); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit, 1, - 100 + (offsets->cnt * 128)); + rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_OffsetCommit, 1, + 100 + (offsets->cnt * 128), + ApiVersion >= 8); /* ConsumerGroup */ rd_kafka_buf_write_str(rkbuf, cgmetadata->group_id, -1); @@ -1637,61 +1747,23 @@ int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, /* Sort offsets by topic */ rd_kafka_topic_partition_list_sort_by_topic(offsets); - /* TopicArrayCnt: Will be updated when we know the number of topics. */ - of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); - - for (i = 0; i < offsets->cnt; i++) { - rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; - - /* Skip partitions with invalid offset. */ - if (rktpar->offset < 0) - continue; - - if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) { - /* New topic */ - - /* Finalize previous PartitionCnt */ - if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, - PartCnt); - - /* TopicName */ - rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); - /* PartitionCnt, finalized later */ - of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartCnt = 0; - last_topic = rktpar->topic; - TopicCnt++; - } - - /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktpar->partition); - PartCnt++; - tot_PartCnt++; - - /* Offset */ - rd_kafka_buf_write_i64(rkbuf, rktpar->offset); + /* Write partition list, filtering out partitions with valid + * offsets */ + rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + ApiVersion >= 6 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH + : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, + ApiVersion == 1 ? RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP + : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, + RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - /* v6: KIP-101 CommittedLeaderEpoch */ - if (ApiVersion >= 6) - rd_kafka_buf_write_i32( - rkbuf, - rd_kafka_topic_partition_get_leader_epoch(rktpar)); - - /* v1: TimeStamp */ - if (ApiVersion == 1) - rd_kafka_buf_write_i64(rkbuf, -1); - - /* Metadata */ - /* Java client 0.9.0 and broker <0.10.0 can't parse - * Null metadata fields, so as a workaround we send an - * empty string if it's Null. */ - if (!rktpar->metadata) - rd_kafka_buf_write_str(rkbuf, "", 0); - else - rd_kafka_buf_write_str(rkbuf, rktpar->metadata, - rktpar->metadata_size); - } + tot_PartCnt = rd_kafka_buf_write_topic_partitions( + rkbuf, offsets, rd_true /*skip invalid offsets*/, + rd_false /*include valid offsets */, + rd_false /*don't use topic id*/, rd_true /*use topic name*/, + fields); if (tot_PartCnt == 0) { /* No topic+partitions had valid offsets to commit. */ @@ -1700,13 +1772,6 @@ int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, return 0; } - /* Finalize previous PartitionCnt */ - if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt); - - /* Finalize TopicCnt */ - rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt); - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_rkb_dbg(rkb, TOPIC, "OFFSET", @@ -1773,6 +1838,7 @@ rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb, rd_kafka_buf_write_topic_partitions( rkbuf, grpoffsets->partitions, rd_false /*dont skip invalid offsets*/, rd_false /*any offset*/, + rd_false /*don't use topic id*/, rd_true /*use topic name*/, fields); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -1803,6 +1869,7 @@ rd_kafka_group_MemberState_consumer_write(rd_kafka_buf_t *env_rkbuf, rd_kafka_buf_write_topic_partitions( rkbuf, rkgm->rkgm_assignment, rd_false /*don't skip invalid offsets*/, rd_false /* any offset */, + rd_false /*don't use topic id*/, rd_true /*use topic name*/, fields); rd_kafka_buf_write_kbytes(rkbuf, rkgm->rkgm_userdata); @@ -2105,6 +2172,153 @@ void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } +void rd_kafka_ConsumerGroupHeartbeatRequest( + rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + int32_t member_epoch, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *rack_id, + int32_t rebalance_timeout_ms, + const rd_kafka_topic_partition_list_t *subscribe_topics, + const rd_kafkap_str_t *remote_assignor, + const rd_kafka_topic_partition_list_t *current_assignments, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + size_t rkbuf_size = 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_ConsumerGroupHeartbeat, 0, 0, &features); + + if (rd_rkb_is_dbg(rkb, CGRP)) { + char current_assignments_str[512] = "NULL"; + char subscribe_topics_str[512] = "NULL"; + const char *member_id_str = "NULL"; + const char *group_instance_id_str = "NULL"; + const char *remote_assignor_str = "NULL"; + + if (current_assignments) { + rd_kafka_topic_partition_list_str( + current_assignments, current_assignments_str, + sizeof(current_assignments_str), 0); + } + if (subscribe_topics) { + rd_kafka_topic_partition_list_str( + subscribe_topics, subscribe_topics_str, + sizeof(subscribe_topics_str), 0); + } + if (member_id) + member_id_str = member_id->str; + if (group_instance_id) + group_instance_id_str = group_instance_id->str; + if (remote_assignor) + remote_assignor_str = remote_assignor->str; + + rd_rkb_dbg(rkb, CGRP, "HEARTBEAT", + "ConsumerGroupHeartbeat of member id \"%s\", group " + "id \"%s\", " + "generation id %" PRId32 + ", group instance id \"%s\"" + ", current assignment \"%s\"" + ", subscribe topics \"%s\"" + ", remote assignor \"%s\"", + member_id_str, group_id->str, member_epoch, + group_instance_id_str, current_assignments_str, + subscribe_topics_str, remote_assignor_str); + } + + size_t next_subscription_size = 0; + + if (subscribe_topics) { + next_subscription_size = + ((subscribe_topics->cnt * (4 + 50)) + 4); + } + + if (group_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(group_id); + if (member_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(member_id); + rkbuf_size += 4; /* MemberEpoch */ + if (group_instance_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(group_instance_id); + if (rack_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(rack_id); + rkbuf_size += 4; /* RebalanceTimeoutMs */ + if (next_subscription_size) + rkbuf_size += next_subscription_size; + if (remote_assignor) + rkbuf_size += RD_KAFKAP_STR_SIZE(remote_assignor); + if (current_assignments) + rkbuf_size += (current_assignments->cnt * (16 + 100)); + rkbuf_size += 4; /* TopicPartitions */ + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ConsumerGroupHeartbeat, 1, rkbuf_size, rd_true); + + rd_kafka_buf_write_kstr(rkbuf, group_id); + rd_kafka_buf_write_kstr(rkbuf, member_id); + rd_kafka_buf_write_i32(rkbuf, member_epoch); + rd_kafka_buf_write_kstr(rkbuf, group_instance_id); + rd_kafka_buf_write_kstr(rkbuf, rack_id); + rd_kafka_buf_write_i32(rkbuf, rebalance_timeout_ms); + + if (subscribe_topics) { + size_t of_TopicsArrayCnt; + int topics_cnt = subscribe_topics->cnt; + + /* write Topics */ + of_TopicsArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicsArrayCnt, + topics_cnt); + while (--topics_cnt >= 0) + rd_kafka_buf_write_str( + rkbuf, subscribe_topics->elems[topics_cnt].topic, + -1); + + } else { + rd_kafka_buf_write_arraycnt(rkbuf, -1); + } + + rd_kafka_buf_write_kstr(rkbuf, remote_assignor); + + if (current_assignments) { + const rd_kafka_topic_partition_field_t + current_assignments_fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, current_assignments, rd_false, rd_false, + rd_true /*use topic id*/, rd_false /*don't use topic name*/, + current_assignments_fields); + } else { + rd_kafka_buf_write_arraycnt(rkbuf, -1); + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + /* FIXME: + * 1) Improve this timeout to something less than + * `rkcg_heartbeat_intvl_ms` so that the next heartbeat + * is not skipped. + * 2) Remove usage of `group_session_timeout_ms` altogether + * from the new protocol defined in KIP-848. + */ + if (rkb->rkb_rk->rk_cgrp->rkcg_heartbeat_intvl_ms > 0) { + rd_kafka_buf_set_abs_timeout( + rkbuf, rkb->rkb_rk->rk_cgrp->rkcg_heartbeat_intvl_ms, 0); + } else { + rd_kafka_buf_set_abs_timeout( + rkbuf, rkb->rkb_rk->rk_conf.group_session_timeout_ms, 0); + } + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); +} + /** @@ -2330,30 +2544,65 @@ static void rd_kafka_handle_Metadata(rd_kafka_t *rk, rd_kafka_op_destroy(rko); } - /** - * @brief Internal implementation of MetadataRequest (does not send). + * @brief Internal implementation of MetadataRequest. + * + * - !topics && !topic_ids: only request brokers (if supported by + * broker, else all topics) + * - topics.cnt > 0 && topic_ids.cnt > 0: invalid request + * - topics.cnt > 0 || topic_ids.cnt > 0: only specified topics + * are requested + * - else: all topics in cluster are requested * - * @param force - rd_true: force a full request (including all topics and - * brokers) even if there is such a request already - * in flight. - * - rd_false: check if there are multiple outstanding full - * requests, and don't send one if there is already - * one present. (See note below.) + * @param topics A list of topic names (char *) to request. + * @param topic_ids A list of topic ids (rd_kafka_Uuid_t *) to request. + * @param reason Metadata request reason + * @param allow_auto_create_topics Allow broker-side auto topic creation. + * This is best-effort, depending on broker + * config and version. + * @param include_cluster_authorized_operations Request for cluster + * authorized operations. + * @param include_topic_authorized_operations Request for topic + * authorized operations. + * @param cgrp_update Update cgrp in parse_Metadata (see comment there). + * @param force_racks Force partition to rack mapping computation in + * parse_Metadata (see comment there). + * @param rko (optional) rko with replyq for handling response. + * Specifying an rko forces a metadata request even if + * there is already a matching one in-transit. + * @param resp_cb Callback to be used for handling response. + * @param replyq replyq on which response is handled. + * @param force rd_true: force a full request (including all topics and + * brokers) even if there is such a request already + * in flight. + * rd_false: check if there are multiple outstanding full + * requests, and don't send one if there is already + * one present. (See note below.) + * @param opaque (optional) parameter to be passed to resp_cb. * - * If full metadata for all topics is requested (or - * all brokers, which results in all-topics on older brokers) and there is - * already a full request in transit then this function will return - * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS otherwise RD_KAFKA_RESP_ERR_NO_ERROR. - * If \p rko is non-NULL or if \p force is true, the request is sent regardless. + * @return Error code: + * If full metadata for all topics is requested (or + * all brokers, which results in all-topics on older brokers) and + * there is already a full request in transit then this function + * will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. * - * \p include_cluster_authorized_operations should not be set unless this - * MetadataRequest is for an admin operation. \sa - * rd_kafka_MetadataRequest_admin(). + * @remark Either \p topics or \p topic_ids must be set, but not both. + * @remark If \p rko is specified, \p resp_cb, \p replyq, \p force, \p opaque + * should be NULL or rd_false. + * @remark If \p rko is non-NULL or if \p force is true, + * the request is sent regardless. + * @remark \p include_cluster_authorized_operations and + * \p include_topic_authorized_operations should not be set unless this + * MetadataRequest is for an admin operation. + * + * @sa rd_kafka_MetadataRequest(). + * @sa rd_kafka_MetadataRequest_resp_cb(). */ static rd_kafka_resp_err_t rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, const rd_list_t *topics, + const rd_list_t *topic_ids, const char *reason, rd_bool_t allow_auto_create_topics, rd_bool_t include_cluster_authorized_operations, @@ -2369,6 +2618,8 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, int16_t ApiVersion = 0; size_t of_TopicArrayCnt; int features; + int topic_id_cnt; + int total_topic_cnt; int topic_cnt = topics ? rd_list_cnt(topics) : 0; int *full_incr = NULL; void *handler_arg = NULL; @@ -2385,9 +2636,15 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, ApiVersion = rd_kafka_broker_ApiVersion_supported( rkb, RD_KAFKAP_Metadata, 0, metadata_max_version, &features); + topic_id_cnt = + (ApiVersion >= 10 && topic_ids) ? rd_list_cnt(topic_ids) : 0; + rd_assert(topic_id_cnt == 0 || ApiVersion >= 12); + + total_topic_cnt = topic_cnt + topic_id_cnt; + rkbuf = rd_kafka_buf_new_flexver_request( rkb, RD_KAFKAP_Metadata, 1, - 4 + (66 /* 50 for topic name and 16 for topic id */ * topic_cnt) + + 4 + ((50 /*topic name */ + 16 /* topic id */) * total_topic_cnt) + 1, ApiVersion >= 9); @@ -2401,7 +2658,7 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, /* TopicArrayCnt */ of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); - if (!topics) { + if (!topics && !topic_ids) { /* v0: keep 0, brokers only not available, * request all topics */ /* v1-8: 0 means empty array, brokers only */ @@ -2416,7 +2673,7 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, full_incr = &rkb->rkb_rk->rk_metadata_cache.rkmc_full_brokers_sent; - } else if (topic_cnt == 0) { + } else if (total_topic_cnt == 0) { /* v0: keep 0, request all topics */ if (ApiVersion >= 1 && ApiVersion < 9) { /* v1-8: update to -1, all topics */ @@ -2435,14 +2692,17 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, .rkmc_full_topics_sent; } else { + /* Cannot request topics by name and id at the same time */ + rd_dassert(!(topic_cnt > 0 && topic_id_cnt > 0)); + /* request cnt topics */ rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, - topic_cnt); + total_topic_cnt); rd_rkb_dbg(rkb, METADATA, "METADATA", "Request metadata for %d topic(s): " "%s", - topic_cnt, reason); + total_topic_cnt, reason); } if (full_incr) { @@ -2484,9 +2744,6 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, RD_LIST_FOREACH(topic, topics, i) { if (ApiVersion >= 10) { - /* FIXME: Not supporting topic id in the request - * right now. Update this to correct topic - * id once KIP-516 is fully implemented. */ rd_kafka_buf_write_uuid(rkbuf, &zero_uuid); } rd_kafka_buf_write_str(rkbuf, topic, -1); @@ -2495,6 +2752,23 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, } } + if (ApiVersion >= 10 && topic_id_cnt > 0) { + int i; + rd_kafka_Uuid_t *topic_id; + + /* Maintain a copy of the topics list so we can purge + * hints from the metadata cache on error. */ + rkbuf->rkbuf_u.Metadata.topic_ids = + rd_list_copy(topic_ids, rd_list_Uuid_copy, NULL); + + RD_LIST_FOREACH(topic_id, topic_ids, i) { + rd_kafka_buf_write_uuid(rkbuf, topic_id); + rd_kafka_buf_write_str(rkbuf, NULL, -1); + /* Tags for previous topic */ + rd_kafka_buf_write_tags(rkbuf); + } + } + if (ApiVersion >= 4) { /* AllowAutoTopicCreation */ rd_kafka_buf_write_bool(rkbuf, allow_auto_create_topics); @@ -2563,47 +2837,18 @@ rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR_NO_ERROR; } - -/** - * @brief Construct a MetadataRequest which uses an optional rko, and the - * default handler callback. - * @sa rd_kafka_MetadataRequest. - */ -static rd_kafka_resp_err_t -rd_kafka_MetadataRequest_op(rd_kafka_broker_t *rkb, - const rd_list_t *topics, - const char *reason, - rd_bool_t allow_auto_create_topics, - rd_bool_t include_cluster_authorized_operations, - rd_bool_t include_topic_authorized_operations, - rd_bool_t cgrp_update, - rd_bool_t force_racks, - rd_kafka_op_t *rko) { - return rd_kafka_MetadataRequest0( - rkb, topics, reason, allow_auto_create_topics, - include_cluster_authorized_operations, - include_topic_authorized_operations, cgrp_update, force_racks, rko, - /* We use the default rd_kafka_handle_Metadata rather than a custom - resp_cb */ - NULL, - /* Use default replyq which works with the default handler - rd_kafka_handle_Metadata. */ - RD_KAFKA_NO_REPLYQ, - /* If the request needs to be forced, rko_u.metadata.force will be - set. We don't provide an explicit parameter force. */ - rd_false, NULL); -} - /** - * @brief Construct MetadataRequest (does not send) - * - * \p topics is a list of topic names (char *) to request. + * @brief Construct and enqueue a MetadataRequest * - * !topics - only request brokers (if supported by broker, else - * all topics) - * topics.cnt==0 - all topics in cluster are requested - * topics.cnt >0 - only specified topics are requested + * - !topics && !topic_ids: only request brokers (if supported by + * broker, else all topics) + * - topics.cnt > 0 && topic_ids.cnt > 0: invalid request + * - topics.cnt > 0 || topic_ids.cnt > 0: only specified topics + * are requested + * - else: all topics in cluster are requested * + * @param topics A list of topic names (char *) to request. + * @param topic_ids A list of topic ids (rd_kafka_Uuid_t *) to request. * @param reason - metadata request reason * @param allow_auto_create_topics - allow broker-side auto topic creation. * This is best-effort, depending on broker @@ -2615,54 +2860,82 @@ rd_kafka_MetadataRequest_op(rd_kafka_broker_t *rkb, * Specifying an rko forces a metadata request even if * there is already a matching one in-transit. * - * If full metadata for all topics is requested (or - * all brokers, which results in all-topics on older brokers) and there is - * already a full request in transit then this function will return - * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS otherwise RD_KAFKA_RESP_ERR_NO_ERROR. - * If \p rko is non-NULL, the request is sent regardless. + * @return Error code: + * If full metadata for all topics is requested (or + * all brokers, which results in all-topics on older brokers) and + * there is already a full request in transit then this function + * will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. + * If \p rko is non-NULL, the request is sent regardless. + * + * @remark Either \p topics or \p topic_ids must be set, but not both. */ rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, const rd_list_t *topics, + rd_list_t *topic_ids, const char *reason, rd_bool_t allow_auto_create_topics, rd_bool_t cgrp_update, rd_bool_t force_racks, rd_kafka_op_t *rko) { - return rd_kafka_MetadataRequest_op( - rkb, topics, reason, allow_auto_create_topics, - /* Cluster and Topic authorized operations are used by admin - * operations only. For non-admin operation cases, NEVER set them to - * true, since it changes the metadata max version to be 10, until - * KIP-700 can be implemented. */ - rd_false, rd_false, cgrp_update, force_racks, rko); + return rd_kafka_MetadataRequest0( + rkb, topics, topic_ids, reason, allow_auto_create_topics, + rd_false /*don't include cluster authorized operations*/, + rd_false /*don't include topic authorized operations*/, cgrp_update, + force_racks, rko, + /* We use the default rd_kafka_handle_Metadata rather than a custom + resp_cb */ + NULL, + /* Use default replyq which works with the default handler + rd_kafka_handle_Metadata. */ + RD_KAFKA_NO_REPLYQ, + /* If the request needs to be forced, rko_u.metadata.force will be + set. We don't provide an explicit parameter force. */ + rd_false, NULL); } - /** - * @brief Construct MetadataRequest for use with AdminAPI (does not send). + * @brief Construct and enqueue a MetadataRequest which use + * response callback \p resp_cb instead of a rko. * - * \p topics is a list of topic names (char *) to request. + * - !topics && !topic_ids: only request brokers (if supported by + * broker, else all topics) + * - topics.cnt > 0 && topic_ids.cnt > 0: invalid request + * - topics.cnt > 0 || topic_ids.cnt > 0: only specified topics + * are requested + * - else: all topics in cluster are requested * - * !topics - only request brokers (if supported by broker, else - * all topics) - * topics.cnt==0 - all topics in cluster are requested - * topics.cnt >0 - only specified topics are requested + * @param topics A list of topic names (char *) to request. + * @param topic_ids A list of topic ids (rd_kafka_Uuid_t *) to request. + * @param reason Metadata request reason + * @param allow_auto_create_topics Allow broker-side auto topic creation. + * This is best-effort, depending on broker + * config and version. + * @param include_cluster_authorized_operations Request for cluster + * authorized operations. + * @param include_topic_authorized_operations Request for topic + * authorized operations. + * @param cgrp_update Update cgrp in parse_Metadata (see comment there). + * @param force_racks Force partition to rack mapping computation in + * parse_Metadata (see comment there). + * @param resp_cb Callback to be used for handling response. + * @param replyq replyq on which response is handled. + * @param force Force request even if in progress. + * @param opaque (optional) parameter to be passed to resp_cb. * - * @param reason - metadata request reason - * @param include_cluster_authorized_operations - request for cluster - * authorized operations. - * @param include_topic_authorized_operations - request for topic authorized - * operations. - * @param cgrp_update - Update cgrp in parse_Metadata (see comment there). - * @param force_racks - Force partition to rack mapping computation in - * parse_Metadata (see comment there). - * @param resp_cb - callback to be used for handling response. - * @param replyq - replyq on which response is handled. - * @param opaque - (optional) parameter to be passed to resp_cb. + * @return Error code: + * If full metadata for all topics is requested (or + * all brokers, which results in all-topics on older brokers) and + * there is already a full request in transit then this function + * will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. + * + * @remark Either \p topics or \p topic_ids must be set, but not both. */ rd_kafka_resp_err_t rd_kafka_MetadataRequest_resp_cb( rd_kafka_broker_t *rkb, const rd_list_t *topics, + const rd_list_t *topics_ids, const char *reason, rd_bool_t allow_auto_create_topics, rd_bool_t include_cluster_authorized_operations, @@ -2674,11 +2947,10 @@ rd_kafka_resp_err_t rd_kafka_MetadataRequest_resp_cb( rd_bool_t force, void *opaque) { return rd_kafka_MetadataRequest0( - rkb, topics, reason, allow_auto_create_topics, + rkb, topics, topics_ids, reason, allow_auto_create_topics, include_cluster_authorized_operations, include_topic_authorized_operations, cgrp_update, force_racks, - NULL /* No op - using custom resp_cb. */, resp_cb, replyq, - rd_true /* Admin operation metadata requests are always forced. */, + NULL /* No op - using custom resp_cb. */, resp_cb, replyq, force, opaque); } @@ -3001,16 +3273,6 @@ void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb, rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); } - - -/** - * @struct Hold temporary result and return values from ProduceResponse - */ -struct rd_kafka_Produce_result { - int64_t offset; /**< Assigned offset of first message */ - int64_t timestamp; /**< (Possibly assigned) offset of first message */ -}; - /** * @brief Parses a Produce reply. * @returns 0 on success or an error code on failure. @@ -3021,7 +3283,7 @@ rd_kafka_handle_Produce_parse(rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request, - struct rd_kafka_Produce_result *result) { + rd_kafka_Produce_result_t *result) { int32_t TopicArrayCnt; int32_t PartitionArrayCnt; struct { @@ -3059,6 +3321,36 @@ rd_kafka_handle_Produce_parse(rd_kafka_broker_t *rkb, if (request->rkbuf_reqhdr.ApiVersion >= 5) rd_kafka_buf_read_i64(rkbuf, &log_start_offset); + if (request->rkbuf_reqhdr.ApiVersion >= 8) { + int i; + int32_t RecordErrorsCnt; + rd_kafkap_str_t ErrorMessage; + rd_kafka_buf_read_i32(rkbuf, &RecordErrorsCnt); + if (RecordErrorsCnt) { + result->record_errors = rd_calloc( + RecordErrorsCnt, sizeof(*result->record_errors)); + result->record_errors_cnt = RecordErrorsCnt; + for (i = 0; i < RecordErrorsCnt; i++) { + int32_t BatchIndex; + rd_kafkap_str_t BatchIndexErrorMessage; + rd_kafka_buf_read_i32(rkbuf, &BatchIndex); + rd_kafka_buf_read_str(rkbuf, + &BatchIndexErrorMessage); + result->record_errors[i].batch_index = + BatchIndex; + if (!RD_KAFKAP_STR_IS_NULL( + &BatchIndexErrorMessage)) + result->record_errors[i].errstr = + RD_KAFKAP_STR_DUP( + &BatchIndexErrorMessage); + } + } + + rd_kafka_buf_read_str(rkbuf, &ErrorMessage); + if (!RD_KAFKAP_STR_IS_NULL(&ErrorMessage)) + result->errstr = RD_KAFKAP_STR_DUP(&ErrorMessage); + } + if (request->rkbuf_reqhdr.ApiVersion >= 1) { int32_t Throttle_Time; rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); @@ -3886,6 +4178,59 @@ rd_kafka_handle_idempotent_Produce_success(rd_kafka_broker_t *rkb, rk, RD_KAFKA_RESP_ERR__INCONSISTENT, "%s", fatal_err); } +/** + * @brief Set \p batch error codes, corresponding to the indices that caused + * the error in 'presult->record_errors', to INVALID_RECORD and + * the rest to _INVALID_DIFFERENT_RECORD. + * + * @param presult Produce result structure + * @param batch Batch of messages + * + * @locks none + * @locality broker thread (but not necessarily the leader broker thread) + */ +static void rd_kafka_msgbatch_handle_Produce_result_record_errors( + const rd_kafka_Produce_result_t *presult, + rd_kafka_msgbatch_t *batch) { + rd_kafka_msg_t *rkm = TAILQ_FIRST(&batch->msgq.rkmq_msgs); + if (presult->record_errors) { + int i = 0, j = 0; + while (rkm) { + if (j < presult->record_errors_cnt && + presult->record_errors[j].batch_index == i) { + rkm->rkm_u.producer.errstr = + presult->record_errors[j].errstr; + /* If the batch contained only a single record + * error, then we can unambiguously use the + * error corresponding to the partition-level + * error code. */ + if (presult->record_errors_cnt > 1) + rkm->rkm_err = + RD_KAFKA_RESP_ERR_INVALID_RECORD; + j++; + } else { + /* If the response contains record errors, then + * the records which failed validation will be + * present in the response. To avoid confusion + * for the remaining records, we return a + * generic error code. */ + rkm->rkm_u.producer.errstr = + "Failed to append record because it was " + "part of a batch " + "which had one more more invalid records"; + rkm->rkm_err = + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD; + } + rkm = TAILQ_NEXT(rkm, rkm_link); + i++; + } + } else if (presult->errstr) { + while (rkm) { + rkm->rkm_u.producer.errstr = presult->errstr; + rkm = TAILQ_NEXT(rkm, rkm_link); + } + } +} /** * @brief Handle ProduceRequest result for a message batch. @@ -3899,7 +4244,7 @@ static void rd_kafka_msgbatch_handle_Produce_result( rd_kafka_broker_t *rkb, rd_kafka_msgbatch_t *batch, rd_kafka_resp_err_t err, - const struct rd_kafka_Produce_result *presult, + const rd_kafka_Produce_result_t *presult, const rd_kafka_buf_t *request) { rd_kafka_t *rk = rkb->rkb_rk; @@ -3968,8 +4313,11 @@ static void rd_kafka_msgbatch_handle_Produce_result( presult->offset, presult->timestamp, status); + /* Change error codes if necessary */ + rd_kafka_msgbatch_handle_Produce_result_record_errors(presult, + batch); /* Enqueue messages for delivery report. */ - rd_kafka_dr_msgq(rktp->rktp_rkt, &batch->msgq, err); + rd_kafka_dr_msgq0(rktp->rktp_rkt, &batch->msgq, err, presult); } if (rd_kafka_is_idempotent(rk) && last_inflight) @@ -3997,10 +4345,10 @@ static void rd_kafka_handle_Produce(rd_kafka_t *rk, rd_kafka_buf_t *reply, rd_kafka_buf_t *request, void *opaque) { - rd_kafka_msgbatch_t *batch = &request->rkbuf_batch; - rd_kafka_toppar_t *rktp = batch->rktp; - struct rd_kafka_Produce_result result = { - .offset = RD_KAFKA_OFFSET_INVALID, .timestamp = -1}; + rd_kafka_msgbatch_t *batch = &request->rkbuf_batch; + rd_kafka_toppar_t *rktp = batch->rktp; + rd_kafka_Produce_result_t *result = + rd_kafka_Produce_result_new(RD_KAFKA_OFFSET_INVALID, -1); /* Unit test interface: inject errors */ if (unlikely(rk->rk_conf.ut.handle_ProduceResponse != NULL)) { @@ -4011,10 +4359,11 @@ static void rd_kafka_handle_Produce(rd_kafka_t *rk, /* Parse Produce reply (unless the request errored) */ if (!err && reply) err = rd_kafka_handle_Produce_parse(rkb, rktp, reply, request, - &result); + result); - rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, &result, + rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, result, request); + rd_kafka_Produce_result_destroy(result); } @@ -4369,7 +4718,8 @@ rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; rd_kafka_buf_write_topic_partitions( rkbuf, partitions, rd_false /*don't skip invalid offsets*/, - rd_false /*any offset*/, fields); + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); /* timeout */ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); @@ -5558,9 +5908,9 @@ static int unittest_idempotent_producer(void) { int remaining_batches; uint64_t msgid = 1; rd_kafka_toppar_t *rktp; - rd_kafka_pid_t pid = {.id = 1000, .epoch = 0}; - struct rd_kafka_Produce_result result = {.offset = 1, - .timestamp = 1000}; + rd_kafka_pid_t pid = {.id = 1000, .epoch = 0}; + rd_kafka_Produce_result_t *result = + rd_kafka_Produce_result_new(1, 1000); rd_kafka_queue_t *rkqu; rd_kafka_event_t *rkev; rd_kafka_buf_t *request[_BATCH_CNT]; @@ -5641,8 +5991,8 @@ static int unittest_idempotent_producer(void) { RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result(rkb, &request[i]->rkbuf_batch, RD_KAFKA_RESP_ERR_NO_ERROR, - &result, request[i]); - result.offset += r; + result, request[i]); + result->offset += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == 0, "batch %d: expected no messages in rktp_msgq, not %d", i, rd_kafka_msgq_len(&rktp->rktp_msgq)); @@ -5655,7 +6005,7 @@ static int unittest_idempotent_producer(void) { RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result( rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, &result, request[i]); + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_msgq, not %d", i, @@ -5668,8 +6018,7 @@ static int unittest_idempotent_producer(void) { RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result( rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result, - request[i]); + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", @@ -5681,8 +6030,7 @@ static int unittest_idempotent_producer(void) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); rd_kafka_msgbatch_handle_Produce_result( rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result, - request[i]); + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", @@ -5722,8 +6070,8 @@ static int unittest_idempotent_producer(void) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); rd_kafka_msgbatch_handle_Produce_result( rkb, &request[i]->rkbuf_batch, RD_KAFKA_RESP_ERR_NO_ERROR, - &result, request[i]); - result.offset += r; + result, request[i]); + result->offset += r; rd_kafka_buf_destroy(request[i]); } @@ -5761,6 +6109,7 @@ static int unittest_idempotent_producer(void) { /* Verify the expected number of good delivery reports were seen */ RD_UT_ASSERT(drcnt == msgcnt, "expected %d DRs, not %d", msgcnt, drcnt); + rd_kafka_Produce_result_destroy(result); rd_kafka_queue_destroy(rkqu); rd_kafka_toppar_destroy(rktp); rd_kafka_broker_destroy(rkb); diff --git a/lib/librdkafka-2.3.0/src/rdkafka_request.h b/lib/librdkafka-2.4.0/src/rdkafka_request.h similarity index 94% rename from lib/librdkafka-2.3.0/src/rdkafka_request.h rename to lib/librdkafka-2.4.0/src/rdkafka_request.h index ec94b0a5a06..98527596824 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_request.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_request.h @@ -75,6 +75,8 @@ typedef enum { RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH, /** Read/write int16_t for error code */ RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + /** Read/write timestamp */ + RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP, /** Read/write str for metadata */ RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA, /** Noop, useful for ternary ifs */ @@ -83,6 +85,8 @@ typedef enum { rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( rd_kafka_buf_t *rkbuf, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, size_t estimated_part_cnt, const rd_kafka_topic_partition_field_t *fields); @@ -91,6 +95,8 @@ int rd_kafka_buf_write_topic_partitions( const rd_kafka_topic_partition_list_t *parts, rd_bool_t skip_invalid_offsets, rd_bool_t only_invalid_offsets, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, const rd_kafka_topic_partition_field_t *fields); rd_kafka_resp_err_t @@ -169,10 +175,18 @@ void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, const char *group_id, rd_kafka_topic_partition_list_t *parts, + rd_bool_t use_topic_id, + int32_t generation_id_or_member_epoch, + rd_kafkap_str_t *member_id, rd_bool_t require_stable_offsets, int timeout, rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, + void (*resp_cb)(rd_kafka_t *, + rd_kafka_broker_t *, + rd_kafka_resp_err_t, + rd_kafka_buf_t *, + rd_kafka_buf_t *, + void *), void *opaque); rd_kafka_resp_err_t @@ -273,8 +287,24 @@ void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, rd_kafka_resp_cb_t *resp_cb, void *opaque); +void rd_kafka_ConsumerGroupHeartbeatRequest( + rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + int32_t member_epoch, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *rack_id, + int32_t rebalance_timeout_ms, + const rd_kafka_topic_partition_list_t *subscribe_topics, + const rd_kafkap_str_t *remote_assignor, + const rd_kafka_topic_partition_list_t *current_assignments, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, const rd_list_t *topics, + rd_list_t *topic_ids, const char *reason, rd_bool_t allow_auto_create_topics, rd_bool_t cgrp_update, @@ -284,6 +314,7 @@ rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, rd_kafka_resp_err_t rd_kafka_MetadataRequest_resp_cb( rd_kafka_broker_t *rkb, const rd_list_t *topics, + const rd_list_t *topic_ids, const char *reason, rd_bool_t allow_auto_create_topics, rd_bool_t include_cluster_authorized_operations, diff --git a/lib/librdkafka-2.3.0/src/rdkafka_roundrobin_assignor.c b/lib/librdkafka-2.4.0/src/rdkafka_roundrobin_assignor.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_roundrobin_assignor.c rename to lib/librdkafka-2.4.0/src/rdkafka_roundrobin_assignor.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl.h b/lib/librdkafka-2.4.0/src/rdkafka_sasl.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl.h rename to lib/librdkafka-2.4.0/src/rdkafka_sasl.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_cyrus.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl_cyrus.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_cyrus.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_cyrus.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_int.h b/lib/librdkafka-2.4.0/src/rdkafka_sasl_int.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_int.h rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_int.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer.h b/lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer.h rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer_oidc.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer_oidc.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer_oidc.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer_oidc.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer_oidc.h b/lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer_oidc.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_oauthbearer_oidc.h rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_oauthbearer_oidc.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_plain.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl_plain.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_plain.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_plain.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_scram.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl_scram.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_scram.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_scram.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sasl_win32.c b/lib/librdkafka-2.4.0/src/rdkafka_sasl_win32.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_sasl_win32.c rename to lib/librdkafka-2.4.0/src/rdkafka_sasl_win32.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_ssl.c b/lib/librdkafka-2.4.0/src/rdkafka_ssl.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_ssl.c rename to lib/librdkafka-2.4.0/src/rdkafka_ssl.c index 85f745cb9ca..0dd7e509dad 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_ssl.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_ssl.c @@ -476,7 +476,8 @@ static int rd_kafka_transport_ssl_set_endpoint_id(rd_kafka_transport_t *rktrans, param = SSL_get0_param(rktrans->rktrans_ssl); - if (!X509_VERIFY_PARAM_set1_host(param, name, 0)) + if (!X509_VERIFY_PARAM_set1_host(param, name, + strnlen(name, sizeof(name)))) goto fail; } #else diff --git a/lib/librdkafka-2.3.0/src/rdkafka_ssl.h b/lib/librdkafka-2.4.0/src/rdkafka_ssl.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_ssl.h rename to lib/librdkafka-2.4.0/src/rdkafka_ssl.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_sticky_assignor.c b/lib/librdkafka-2.4.0/src/rdkafka_sticky_assignor.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_sticky_assignor.c rename to lib/librdkafka-2.4.0/src/rdkafka_sticky_assignor.c index 462da614781..5b7658712c8 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_sticky_assignor.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_sticky_assignor.c @@ -2125,9 +2125,10 @@ static rd_kafkap_bytes_t *rd_kafka_sticky_assignor_get_metadata( const rd_kafka_topic_partition_field_t fields[] = { RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - rd_kafka_buf_write_topic_partitions(rkbuf, state->prev_assignment, - rd_false /*skip invalid offsets*/, - rd_false /*any offset*/, fields); + rd_kafka_buf_write_topic_partitions( + rkbuf, state->prev_assignment, rd_false /*skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); rd_kafka_buf_write_i32(rkbuf, state->generation_id); /* Get binary buffer and allocate a new Kafka Bytes with a copy. */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_subscription.c b/lib/librdkafka-2.4.0/src/rdkafka_subscription.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_subscription.c rename to lib/librdkafka-2.4.0/src/rdkafka_subscription.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_timer.c b/lib/librdkafka-2.4.0/src/rdkafka_timer.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_timer.c rename to lib/librdkafka-2.4.0/src/rdkafka_timer.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_timer.h b/lib/librdkafka-2.4.0/src/rdkafka_timer.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_timer.h rename to lib/librdkafka-2.4.0/src/rdkafka_timer.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_topic.c b/lib/librdkafka-2.4.0/src/rdkafka_topic.c similarity index 94% rename from lib/librdkafka-2.3.0/src/rdkafka_topic.c rename to lib/librdkafka-2.4.0/src/rdkafka_topic.c index 5a161db9ac1..bd1239d5019 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_topic.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_topic.c @@ -189,6 +189,22 @@ rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func, return rkt; } +/** + * Same semantics as ..find() but takes a Uuid instead. + */ +rd_kafka_topic_t *rd_kafka_topic_find_by_topic_id(rd_kafka_t *rk, + rd_kafka_Uuid_t topic_id) { + rd_kafka_topic_t *rkt; + + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + if (!rd_kafka_Uuid_cmp(rkt->rkt_topic_id, topic_id)) { + rd_kafka_topic_keep(rkt); + break; + } + } + + return rkt; +} /** * @brief rd_kafka_topic_t comparator. @@ -646,8 +662,8 @@ static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt, rd_kafka_broker_t *leader, int32_t leader_epoch) { rd_kafka_toppar_t *rktp; - rd_bool_t fetching_from_follower, need_epoch_validation = rd_false; - int r = 0; + rd_bool_t need_epoch_validation = rd_false; + int r = 0; rktp = rd_kafka_toppar_get(rkt, partition, 0); if (unlikely(!rktp)) { @@ -675,14 +691,17 @@ static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, leader_epoch, rktp->rktp_leader_epoch); - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE) { + if (rktp->rktp_fetch_state != + RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) { rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(rktp); /* from get() */ return 0; } } - if (leader_epoch > rktp->rktp_leader_epoch) { + if (rktp->rktp_leader_epoch == -1 || + leader_epoch > rktp->rktp_leader_epoch) { + rd_bool_t fetching_from_follower; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", "%s [%" PRId32 "]: leader %" PRId32 " epoch %" PRId32 " -> leader %" PRId32 @@ -690,44 +709,50 @@ static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_leader_id, rktp->rktp_leader_epoch, leader_id, leader_epoch); - rktp->rktp_leader_epoch = leader_epoch; - need_epoch_validation = rd_true; - } else if (rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) + if (leader_epoch > rktp->rktp_leader_epoch) + rktp->rktp_leader_epoch = leader_epoch; need_epoch_validation = rd_true; - fetching_from_follower = - leader != NULL && rktp->rktp_broker != NULL && - rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL && - rktp->rktp_broker != leader; - if (fetching_from_follower && rktp->rktp_leader_id == leader_id) { - rd_kafka_dbg( - rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", - "Topic %s [%" PRId32 "]: leader %" PRId32 - " unchanged, " - "not migrating away from preferred replica %" PRId32, - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - leader_id, rktp->rktp_broker_id); - r = 0; + fetching_from_follower = + leader != NULL && rktp->rktp_broker != NULL && + rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL && + rktp->rktp_broker != leader; - } else { + if (fetching_from_follower && + rktp->rktp_leader_id == leader_id) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", + "Topic %s [%" PRId32 "]: leader %" PRId32 + " unchanged, " + "not migrating away from preferred " + "replica %" PRId32, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, leader_id, + rktp->rktp_broker_id); + r = 0; + + } else { + + if (rktp->rktp_leader_id != leader_id || + rktp->rktp_leader != leader) { + /* Update leader if it has changed */ + rktp->rktp_leader_id = leader_id; + if (rktp->rktp_leader) + rd_kafka_broker_destroy( + rktp->rktp_leader); + if (leader) + rd_kafka_broker_keep(leader); + rktp->rktp_leader = leader; + } - if (rktp->rktp_leader_id != leader_id || - rktp->rktp_leader != leader) { - /* Update leader if it has changed */ - rktp->rktp_leader_id = leader_id; - if (rktp->rktp_leader) - rd_kafka_broker_destroy(rktp->rktp_leader); - if (leader) - rd_kafka_broker_keep(leader); - rktp->rktp_leader = leader; + /* Update handling broker */ + r = rd_kafka_toppar_broker_update( + rktp, leader_id, leader, "leader updated"); } - /* Update handling broker */ - r = rd_kafka_toppar_broker_update(rktp, leader_id, leader, - "leader updated"); - } + } else if (rktp->rktp_fetch_state == + RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) + need_epoch_validation = rd_true; if (need_epoch_validation) { /* Set offset validation position, @@ -1261,8 +1286,8 @@ rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, rd_kafka_broker_t **partbrokers; int leader_cnt = 0; int old_state; - rd_bool_t partition_exists_with_no_leader_epoch = rd_false; - rd_bool_t partition_exists_with_updated_leader_epoch = rd_false; + rd_bool_t partition_exists_with_no_leader_epoch = rd_false; + rd_bool_t partition_exists_with_stale_leader_epoch = rd_false; if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR) rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA", @@ -1297,9 +1322,10 @@ rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, rkt->rkt_ts_metadata = ts_age; /* Set topic state. - * UNKNOWN_TOPIC_OR_PART may indicate that auto.create.topics failed */ + * UNKNOWN_TOPIC_* may indicate that auto.create.topics failed */ if (mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION /*invalid topic*/ || - mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART || + mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID) rd_kafka_topic_set_notexists(rkt, mdt->err); else if (mdt->partition_cnt > 0) rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_EXISTS); @@ -1311,7 +1337,17 @@ rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) { upd += rd_kafka_topic_partition_cnt_update(rkt, mdt->partition_cnt); - + if (rd_kafka_Uuid_cmp(mdit->topic_id, RD_KAFKA_UUID_ZERO)) { + /* FIXME: an offset reset must be triggered. + * when rkt_topic_id wasn't zero. + * There are no problems + * in test 0107_topic_recreate if offsets in new + * topic are lower than in previous one, + * causing an out of range and an offset reset, + * but the rarer case where they're higher needs + * to be checked. */ + rkt->rkt_topic_id = mdit->topic_id; + } /* If the metadata times out for a topic (because all brokers * are down) the state will transition to S_UNKNOWN. * When updated metadata is eventually received there might @@ -1325,7 +1361,7 @@ rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, /* Update leader for each partition */ for (j = 0; j < mdt->partition_cnt; j++) { - int r; + int r = 0; rd_kafka_broker_t *leader; int32_t leader_epoch = mdit->partitions[j].leader_epoch; rd_kafka_toppar_t *rktp = @@ -1344,8 +1380,8 @@ rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, * set to -1, we assume that metadata is not stale. */ if (leader_epoch == -1) partition_exists_with_no_leader_epoch = rd_true; - else if (rktp->rktp_leader_epoch < leader_epoch) - partition_exists_with_updated_leader_epoch = rd_true; + else if (leader_epoch < rktp->rktp_leader_epoch) + partition_exists_with_stale_leader_epoch = rd_true; /* Update leader for partition */ @@ -1368,7 +1404,7 @@ rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, * stale, we can turn off fast leader query. */ if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt && (partition_exists_with_no_leader_epoch || - partition_exists_with_updated_leader_epoch)) + !partition_exists_with_stale_leader_epoch)) rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) { @@ -1419,8 +1455,15 @@ int rd_kafka_topic_metadata_update2( int r; rd_kafka_wrlock(rkb->rkb_rk); - if (!(rkt = - rd_kafka_topic_find(rkb->rkb_rk, mdt->topic, 0 /*!lock*/))) { + + if (likely(mdt->topic != NULL)) { + rkt = rd_kafka_topic_find(rkb->rkb_rk, mdt->topic, 0 /*!lock*/); + } else { + rkt = rd_kafka_topic_find_by_topic_id(rkb->rkb_rk, + mdit->topic_id); + } + + if (!rkt) { rd_kafka_wrunlock(rkb->rkb_rk); return -1; /* Ignore topics that we dont have locally. */ } @@ -2021,7 +2064,7 @@ void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt, rd_kafka_wrlock(rkt->rkt_rk); rd_kafka_metadata_cache_topic_update(rkt->rkt_rk, &mdt, &mdit, rd_true, - rd_false, NULL, 0); + rd_false, NULL, 0, rd_false); rd_kafka_topic_metadata_update(rkt, &mdt, &mdit, rd_clock()); rd_kafka_wrunlock(rkt->rkt_rk); rd_free(partitions); diff --git a/lib/librdkafka-2.3.0/src/rdkafka_topic.h b/lib/librdkafka-2.4.0/src/rdkafka_topic.h similarity index 96% rename from lib/librdkafka-2.3.0/src/rdkafka_topic.h rename to lib/librdkafka-2.4.0/src/rdkafka_topic.h index b8c0b66c99e..6e25e7f74eb 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_topic.h +++ b/lib/librdkafka-2.4.0/src/rdkafka_topic.h @@ -109,6 +109,16 @@ typedef struct rd_kafka_partition_leader_epoch_s { int32_t leader_epoch; } rd_kafka_partition_leader_epoch_t; +/** + * Finds and returns a topic based on its topic_id, or NULL if not found. + * The 'rkt' refcount is increased by one and the caller must call + * rd_kafka_topic_destroy() when it is done with the topic to decrease + * the refcount. + * + * Locality: any thread + */ +rd_kafka_topic_t *rd_kafka_topic_find_by_topic_id(rd_kafka_t *rk, + rd_kafka_Uuid_t topic_id); /* * @struct Internal representation of a topic. @@ -124,6 +134,7 @@ struct rd_kafka_topic_s { rwlock_t rkt_lock; rd_kafkap_str_t *rkt_topic; + rd_kafka_Uuid_t rkt_topic_id; rd_kafka_toppar_t *rkt_ua; /**< Unassigned partition (-1) */ rd_kafka_toppar_t **rkt_p; /**< Partition array */ diff --git a/lib/librdkafka-2.3.0/src/rdkafka_transport.c b/lib/librdkafka-2.4.0/src/rdkafka_transport.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_transport.c rename to lib/librdkafka-2.4.0/src/rdkafka_transport.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_transport.h b/lib/librdkafka-2.4.0/src/rdkafka_transport.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_transport.h rename to lib/librdkafka-2.4.0/src/rdkafka_transport.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_transport_int.h b/lib/librdkafka-2.4.0/src/rdkafka_transport_int.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_transport_int.h rename to lib/librdkafka-2.4.0/src/rdkafka_transport_int.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_txnmgr.c b/lib/librdkafka-2.4.0/src/rdkafka_txnmgr.c similarity index 99% rename from lib/librdkafka-2.3.0/src/rdkafka_txnmgr.c rename to lib/librdkafka-2.4.0/src/rdkafka_txnmgr.c index cd8a60f30ad..90d330146fe 100644 --- a/lib/librdkafka-2.3.0/src/rdkafka_txnmgr.c +++ b/lib/librdkafka-2.4.0/src/rdkafka_txnmgr.c @@ -1500,7 +1500,8 @@ static void rd_kafka_txn_handle_TxnOffsetCommit(rd_kafka_t *rk, RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - partitions = rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields); + partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields); if (!partitions) goto err_parse; @@ -1716,7 +1717,8 @@ rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb, RD_KAFKA_TOPIC_PARTITION_FIELD_END}; cnt = rd_kafka_buf_write_topic_partitions( rkbuf, rko->rko_u.txn.offsets, rd_true /*skip invalid offsets*/, - rd_false /*any offset*/, fields); + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); if (!cnt) { /* No valid partition offsets, don't commit. */ rd_kafka_buf_destroy(rkbuf); diff --git a/lib/librdkafka-2.3.0/src/rdkafka_txnmgr.h b/lib/librdkafka-2.4.0/src/rdkafka_txnmgr.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_txnmgr.h rename to lib/librdkafka-2.4.0/src/rdkafka_txnmgr.h diff --git a/lib/librdkafka-2.3.0/src/rdkafka_zstd.c b/lib/librdkafka-2.4.0/src/rdkafka_zstd.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_zstd.c rename to lib/librdkafka-2.4.0/src/rdkafka_zstd.c diff --git a/lib/librdkafka-2.3.0/src/rdkafka_zstd.h b/lib/librdkafka-2.4.0/src/rdkafka_zstd.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdkafka_zstd.h rename to lib/librdkafka-2.4.0/src/rdkafka_zstd.h diff --git a/lib/librdkafka-2.3.0/src/rdlist.c b/lib/librdkafka-2.4.0/src/rdlist.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdlist.c rename to lib/librdkafka-2.4.0/src/rdlist.c diff --git a/lib/librdkafka-2.3.0/src/rdlist.h b/lib/librdkafka-2.4.0/src/rdlist.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdlist.h rename to lib/librdkafka-2.4.0/src/rdlist.h diff --git a/lib/librdkafka-2.3.0/src/rdlog.c b/lib/librdkafka-2.4.0/src/rdlog.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdlog.c rename to lib/librdkafka-2.4.0/src/rdlog.c diff --git a/lib/librdkafka-2.3.0/src/rdlog.h b/lib/librdkafka-2.4.0/src/rdlog.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdlog.h rename to lib/librdkafka-2.4.0/src/rdlog.h diff --git a/lib/librdkafka-2.3.0/src/rdmap.c b/lib/librdkafka-2.4.0/src/rdmap.c similarity index 97% rename from lib/librdkafka-2.3.0/src/rdmap.c rename to lib/librdkafka-2.4.0/src/rdmap.c index 8e1a0546cc5..1e82bcb9a2b 100644 --- a/lib/librdkafka-2.3.0/src/rdmap.c +++ b/lib/librdkafka-2.4.0/src/rdmap.c @@ -2,6 +2,7 @@ * librdkafka - The Apache Kafka C/C++ library * * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -237,6 +238,21 @@ unsigned int rd_map_str_hash(const void *key) { } +/** + * @returns a djb2 hash of \p bytes. + * + * @param len \p bytes will be hashed up to \p len. + */ +unsigned int rd_bytes_hash(unsigned char *bytes, size_t len) { + unsigned int hash = 5381; + size_t i; + + for (i = 0; i < len; i++) + hash = ((hash << 5) + hash) + bytes[i]; + + return hash; +} + /** * @name Unit tests diff --git a/lib/librdkafka-2.3.0/src/rdmap.h b/lib/librdkafka-2.4.0/src/rdmap.h similarity index 99% rename from lib/librdkafka-2.3.0/src/rdmap.h rename to lib/librdkafka-2.4.0/src/rdmap.h index bea8a1aca6d..b8e3feb97bf 100644 --- a/lib/librdkafka-2.3.0/src/rdmap.h +++ b/lib/librdkafka-2.4.0/src/rdmap.h @@ -2,6 +2,7 @@ * librdkafka - The Apache Kafka C/C++ library * * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -249,6 +250,10 @@ int rd_map_str_cmp(const void *a, const void *b); */ unsigned int rd_map_str_hash(const void *a); +/** + * @brief Bytes hash function (djb2). + */ +unsigned int rd_bytes_hash(unsigned char *bytes, size_t len); /** diff --git a/lib/librdkafka-2.3.0/src/rdmurmur2.c b/lib/librdkafka-2.4.0/src/rdmurmur2.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdmurmur2.c rename to lib/librdkafka-2.4.0/src/rdmurmur2.c diff --git a/lib/librdkafka-2.3.0/src/rdmurmur2.h b/lib/librdkafka-2.4.0/src/rdmurmur2.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdmurmur2.h rename to lib/librdkafka-2.4.0/src/rdmurmur2.h diff --git a/lib/librdkafka-2.3.0/src/rdports.c b/lib/librdkafka-2.4.0/src/rdports.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdports.c rename to lib/librdkafka-2.4.0/src/rdports.c diff --git a/lib/librdkafka-2.3.0/src/rdports.h b/lib/librdkafka-2.4.0/src/rdports.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdports.h rename to lib/librdkafka-2.4.0/src/rdports.h diff --git a/lib/librdkafka-2.3.0/src/rdposix.h b/lib/librdkafka-2.4.0/src/rdposix.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdposix.h rename to lib/librdkafka-2.4.0/src/rdposix.h diff --git a/lib/librdkafka-2.3.0/src/rdrand.c b/lib/librdkafka-2.4.0/src/rdrand.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdrand.c rename to lib/librdkafka-2.4.0/src/rdrand.c diff --git a/lib/librdkafka-2.3.0/src/rdrand.h b/lib/librdkafka-2.4.0/src/rdrand.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdrand.h rename to lib/librdkafka-2.4.0/src/rdrand.h diff --git a/lib/librdkafka-2.3.0/src/rdregex.c b/lib/librdkafka-2.4.0/src/rdregex.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdregex.c rename to lib/librdkafka-2.4.0/src/rdregex.c diff --git a/lib/librdkafka-2.3.0/src/rdregex.h b/lib/librdkafka-2.4.0/src/rdregex.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdregex.h rename to lib/librdkafka-2.4.0/src/rdregex.h diff --git a/lib/librdkafka-2.3.0/src/rdsignal.h b/lib/librdkafka-2.4.0/src/rdsignal.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdsignal.h rename to lib/librdkafka-2.4.0/src/rdsignal.h diff --git a/lib/librdkafka-2.3.0/src/rdstring.c b/lib/librdkafka-2.4.0/src/rdstring.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdstring.c rename to lib/librdkafka-2.4.0/src/rdstring.c diff --git a/lib/librdkafka-2.3.0/src/rdstring.h b/lib/librdkafka-2.4.0/src/rdstring.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdstring.h rename to lib/librdkafka-2.4.0/src/rdstring.h diff --git a/lib/librdkafka-2.3.0/src/rdsysqueue.h b/lib/librdkafka-2.4.0/src/rdsysqueue.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdsysqueue.h rename to lib/librdkafka-2.4.0/src/rdsysqueue.h diff --git a/lib/librdkafka-2.3.0/src/rdtime.h b/lib/librdkafka-2.4.0/src/rdtime.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdtime.h rename to lib/librdkafka-2.4.0/src/rdtime.h diff --git a/lib/librdkafka-2.3.0/src/rdtypes.h b/lib/librdkafka-2.4.0/src/rdtypes.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdtypes.h rename to lib/librdkafka-2.4.0/src/rdtypes.h diff --git a/lib/librdkafka-2.3.0/src/rdunittest.c b/lib/librdkafka-2.4.0/src/rdunittest.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdunittest.c rename to lib/librdkafka-2.4.0/src/rdunittest.c diff --git a/lib/librdkafka-2.3.0/src/rdunittest.h b/lib/librdkafka-2.4.0/src/rdunittest.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdunittest.h rename to lib/librdkafka-2.4.0/src/rdunittest.h diff --git a/lib/librdkafka-2.3.0/src/rdvarint.c b/lib/librdkafka-2.4.0/src/rdvarint.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdvarint.c rename to lib/librdkafka-2.4.0/src/rdvarint.c diff --git a/lib/librdkafka-2.3.0/src/rdvarint.h b/lib/librdkafka-2.4.0/src/rdvarint.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdvarint.h rename to lib/librdkafka-2.4.0/src/rdvarint.h diff --git a/lib/librdkafka-2.3.0/src/rdwin32.h b/lib/librdkafka-2.4.0/src/rdwin32.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdwin32.h rename to lib/librdkafka-2.4.0/src/rdwin32.h diff --git a/lib/librdkafka-2.3.0/src/rdxxhash.c b/lib/librdkafka-2.4.0/src/rdxxhash.c similarity index 100% rename from lib/librdkafka-2.3.0/src/rdxxhash.c rename to lib/librdkafka-2.4.0/src/rdxxhash.c diff --git a/lib/librdkafka-2.3.0/src/rdxxhash.h b/lib/librdkafka-2.4.0/src/rdxxhash.h similarity index 100% rename from lib/librdkafka-2.3.0/src/rdxxhash.h rename to lib/librdkafka-2.4.0/src/rdxxhash.h diff --git a/lib/librdkafka-2.3.0/src/regexp.c b/lib/librdkafka-2.4.0/src/regexp.c similarity index 100% rename from lib/librdkafka-2.3.0/src/regexp.c rename to lib/librdkafka-2.4.0/src/regexp.c diff --git a/lib/librdkafka-2.3.0/src/regexp.h b/lib/librdkafka-2.4.0/src/regexp.h similarity index 100% rename from lib/librdkafka-2.3.0/src/regexp.h rename to lib/librdkafka-2.4.0/src/regexp.h diff --git a/lib/librdkafka-2.3.0/src/snappy.c b/lib/librdkafka-2.4.0/src/snappy.c similarity index 100% rename from lib/librdkafka-2.3.0/src/snappy.c rename to lib/librdkafka-2.4.0/src/snappy.c diff --git a/lib/librdkafka-2.3.0/src/snappy.h b/lib/librdkafka-2.4.0/src/snappy.h similarity index 100% rename from lib/librdkafka-2.3.0/src/snappy.h rename to lib/librdkafka-2.4.0/src/snappy.h diff --git a/lib/librdkafka-2.3.0/src/snappy_compat.h b/lib/librdkafka-2.4.0/src/snappy_compat.h similarity index 100% rename from lib/librdkafka-2.3.0/src/snappy_compat.h rename to lib/librdkafka-2.4.0/src/snappy_compat.h diff --git a/lib/librdkafka-2.3.0/src/statistics_schema.json b/lib/librdkafka-2.4.0/src/statistics_schema.json similarity index 100% rename from lib/librdkafka-2.3.0/src/statistics_schema.json rename to lib/librdkafka-2.4.0/src/statistics_schema.json diff --git a/lib/librdkafka-2.3.0/src/tinycthread.c b/lib/librdkafka-2.4.0/src/tinycthread.c similarity index 100% rename from lib/librdkafka-2.3.0/src/tinycthread.c rename to lib/librdkafka-2.4.0/src/tinycthread.c diff --git a/lib/librdkafka-2.3.0/src/tinycthread.h b/lib/librdkafka-2.4.0/src/tinycthread.h similarity index 100% rename from lib/librdkafka-2.3.0/src/tinycthread.h rename to lib/librdkafka-2.4.0/src/tinycthread.h diff --git a/lib/librdkafka-2.3.0/src/tinycthread_extra.c b/lib/librdkafka-2.4.0/src/tinycthread_extra.c similarity index 100% rename from lib/librdkafka-2.3.0/src/tinycthread_extra.c rename to lib/librdkafka-2.4.0/src/tinycthread_extra.c diff --git a/lib/librdkafka-2.3.0/src/tinycthread_extra.h b/lib/librdkafka-2.4.0/src/tinycthread_extra.h similarity index 100% rename from lib/librdkafka-2.3.0/src/tinycthread_extra.h rename to lib/librdkafka-2.4.0/src/tinycthread_extra.h diff --git a/lib/librdkafka-2.3.0/src/win32_config.h b/lib/librdkafka-2.4.0/src/win32_config.h similarity index 100% rename from lib/librdkafka-2.3.0/src/win32_config.h rename to lib/librdkafka-2.4.0/src/win32_config.h diff --git a/lib/librdkafka-2.3.0/tests/.gitignore b/lib/librdkafka-2.4.0/tests/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/tests/.gitignore rename to lib/librdkafka-2.4.0/tests/.gitignore diff --git a/lib/librdkafka-2.3.0/tests/0000-unittests.c b/lib/librdkafka-2.4.0/tests/0000-unittests.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0000-unittests.c rename to lib/librdkafka-2.4.0/tests/0000-unittests.c diff --git a/lib/librdkafka-2.3.0/tests/0001-multiobj.c b/lib/librdkafka-2.4.0/tests/0001-multiobj.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0001-multiobj.c rename to lib/librdkafka-2.4.0/tests/0001-multiobj.c diff --git a/lib/librdkafka-2.3.0/tests/0002-unkpart.c b/lib/librdkafka-2.4.0/tests/0002-unkpart.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0002-unkpart.c rename to lib/librdkafka-2.4.0/tests/0002-unkpart.c diff --git a/lib/librdkafka-2.3.0/tests/0003-msgmaxsize.c b/lib/librdkafka-2.4.0/tests/0003-msgmaxsize.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0003-msgmaxsize.c rename to lib/librdkafka-2.4.0/tests/0003-msgmaxsize.c diff --git a/lib/librdkafka-2.3.0/tests/0004-conf.c b/lib/librdkafka-2.4.0/tests/0004-conf.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0004-conf.c rename to lib/librdkafka-2.4.0/tests/0004-conf.c diff --git a/lib/librdkafka-2.3.0/tests/0005-order.c b/lib/librdkafka-2.4.0/tests/0005-order.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0005-order.c rename to lib/librdkafka-2.4.0/tests/0005-order.c diff --git a/lib/librdkafka-2.3.0/tests/0006-symbols.c b/lib/librdkafka-2.4.0/tests/0006-symbols.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0006-symbols.c rename to lib/librdkafka-2.4.0/tests/0006-symbols.c diff --git a/lib/librdkafka-2.3.0/tests/0007-autotopic.c b/lib/librdkafka-2.4.0/tests/0007-autotopic.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0007-autotopic.c rename to lib/librdkafka-2.4.0/tests/0007-autotopic.c diff --git a/lib/librdkafka-2.3.0/tests/0008-reqacks.c b/lib/librdkafka-2.4.0/tests/0008-reqacks.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0008-reqacks.c rename to lib/librdkafka-2.4.0/tests/0008-reqacks.c diff --git a/lib/librdkafka-2.3.0/tests/0009-mock_cluster.c b/lib/librdkafka-2.4.0/tests/0009-mock_cluster.c similarity index 96% rename from lib/librdkafka-2.3.0/tests/0009-mock_cluster.c rename to lib/librdkafka-2.4.0/tests/0009-mock_cluster.c index 23a953fbe3a..07ab0e88649 100644 --- a/lib/librdkafka-2.3.0/tests/0009-mock_cluster.c +++ b/lib/librdkafka-2.4.0/tests/0009-mock_cluster.c @@ -46,10 +46,7 @@ int main_0009_mock_cluster(int argc, char **argv) { const char *bootstraps; rd_kafka_topic_partition_list_t *parts; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); mcluster = test_mock_cluster_new(3, &bootstraps); diff --git a/lib/librdkafka-2.3.0/tests/0011-produce_batch.c b/lib/librdkafka-2.4.0/tests/0011-produce_batch.c similarity index 75% rename from lib/librdkafka-2.3.0/tests/0011-produce_batch.c rename to lib/librdkafka-2.4.0/tests/0011-produce_batch.c index fd8d2e2d474..1507d76f9e7 100644 --- a/lib/librdkafka-2.3.0/tests/0011-produce_batch.c +++ b/lib/librdkafka-2.4.0/tests/0011-produce_batch.c @@ -38,13 +38,16 @@ #include "rdkafka.h" /* for Kafka driver */ -static int msgid_next = 0; -static int fails = 0; -static int msgcounter = 0; -static int *dr_partition_count = NULL; -static const int topic_num_partitions = 4; -static int msg_partition_wo_flag = 2; -static int msg_partition_wo_flag_success = 0; +static int msgid_next = 0; +static int fails = 0; +static int msgcounter = 0; +static int *dr_partition_count = NULL; +static const int topic_num_partitions = 4; +static int msg_partition_wo_flag = 2; +static int msg_partition_wo_flag_success = 0; +static int invalid_record_fail_cnt = 0; +static int invalid_different_record_fail_cnt = 0; +static int valid_message_cnt = 0; /** * Delivery reported callback. @@ -88,6 +91,8 @@ static void test_single_partition(void) { int i; rd_kafka_message_t *rkmessages; + SUB_TEST_QUICK(); + msgid_next = 0; test_conf_init(&conf, &topic_conf, 20); @@ -173,7 +178,7 @@ static void test_single_partition(void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } @@ -218,6 +223,8 @@ static void test_partitioner(void) { int i; rd_kafka_message_t *rkmessages; + SUB_TEST_QUICK(); + test_conf_init(&conf, &topic_conf, 30); /* Set delivery report callback */ @@ -297,7 +304,7 @@ static void test_partitioner(void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } static void dr_per_message_partition_cb(rd_kafka_t *rk, @@ -338,6 +345,8 @@ static void test_per_message_partition_flag(void) { rd_kafka_message_t *rkmessages; const char *topic_name; + SUB_TEST_QUICK(); + test_conf_init(&conf, &topic_conf, 30); /* Set delivery report callback */ @@ -435,7 +444,7 @@ static void test_per_message_partition_flag(void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } static void @@ -474,6 +483,8 @@ static void test_message_partitioner_wo_per_message_flag(void) { int i; rd_kafka_message_t *rkmessages; + SUB_TEST_QUICK(); + test_conf_init(&conf, &topic_conf, 30); /* Set delivery report callback */ @@ -562,7 +573,161 @@ static void test_message_partitioner_wo_per_message_flag(void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); +} + +static void +dr_message_single_partition_record_fail(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { + free(rkmessage->_private); + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR_INVALID_RECORD) + invalid_record_fail_cnt++; + else if (rkmessage->err == + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD) + invalid_different_record_fail_cnt++; + } else { + valid_message_cnt++; + } + msgcounter--; +} + +/** + * @brief Some messages fail because of INVALID_RECORD: compacted topic + * but no key was sent. + * + * - variation 1: they're in the same batch, rest of messages + * fail with _INVALID_DIFFERENT_RECORD + * - variation 2: one message per batch, other messages succeed + */ +static void test_message_single_partition_record_fail(int variation) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = 100; + int failcnt = 0; + int i; + rd_kafka_message_t *rkmessages; + const char *topic_name = test_mk_topic_name(__FUNCTION__, 1); + invalid_record_fail_cnt = 0; + invalid_different_record_fail_cnt = 0; + + SUB_TEST_QUICK(); + + const char *confs_set_append[] = {"cleanup.policy", "APPEND", + "compact"}; + + const char *confs_delete_subtract[] = {"cleanup.policy", "SUBTRACT", + "compact"}; + + test_conf_init(&conf, &topic_conf, 20); + if (variation == 1) + test_conf_set(conf, "batch.size", "1"); + + /* Set delivery report callback */ + rd_kafka_conf_set_dr_msg_cb(conf, + dr_message_single_partition_record_fail); + + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + TEST_SAY( + "test_message_single_partition_record_fail: Created kafka instance " + "%s\n", + rd_kafka_name(rk)); + + rkt = rd_kafka_topic_new(rk, topic_name, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + test_wait_topic_exists(rk, topic_name, 5000); + + test_IncrementalAlterConfigs_simple(rk, RD_KAFKA_RESOURCE_TOPIC, + topic_name, confs_set_append, 1); + rd_sleep(1); + + + /* Create messages */ + rkmessages = calloc(sizeof(*rkmessages), msgcnt); + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", + __FILE__, __FUNCTION__, i); + if (i % 10 == 0) { + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + + } else { + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i].key = rd_strdup(msg); + rkmessages[i].key_len = strlen(msg); + } + rkmessages[i]._private = msgidp; + rkmessages[i].partition = 2; + } + + r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, + rkmessages, msgcnt); + + if (r < msgcnt) { + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); + if (msgcnt - r != failcnt) + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); + TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); + } + + for (i = 0; i < msgcnt; i++) + free(rkmessages[i].key); + free(rkmessages); + TEST_SAY( + "test_message_single_partition_record_fail: " + "Produced %i messages, waiting for deliveries\n", + r); + + msgcounter = msgcnt; + + /* Wait for messages to be delivered */ + test_wait_delivery(rk, &msgcounter); + TEST_SAY( + "invalid_record_fail_cnt: %d invalid_different_record_fail_cnt : " + "%d \n", + invalid_record_fail_cnt, invalid_different_record_fail_cnt); + TEST_ASSERT(invalid_record_fail_cnt == 10); + if (variation == 0) + TEST_ASSERT(invalid_different_record_fail_cnt == 90); + else if (variation == 1) + TEST_ASSERT(valid_message_cnt == 90); + + test_IncrementalAlterConfigs_simple( + rk, RD_KAFKA_RESOURCE_TOPIC, topic_name, confs_delete_subtract, 1); + + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); + + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + test_DeleteTopics_simple(rk, NULL, (char **)&topic_name, 1, NULL); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + SUB_TEST_PASS(); } @@ -572,5 +737,8 @@ int main_0011_produce_batch(int argc, char **argv) { test_partitioner(); if (test_can_create_topics(1)) test_per_message_partition_flag(); + + test_message_single_partition_record_fail(0); + test_message_single_partition_record_fail(1); return 0; } diff --git a/lib/librdkafka-2.3.0/tests/0012-produce_consume.c b/lib/librdkafka-2.4.0/tests/0012-produce_consume.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0012-produce_consume.c rename to lib/librdkafka-2.4.0/tests/0012-produce_consume.c diff --git a/lib/librdkafka-2.3.0/tests/0013-null-msgs.c b/lib/librdkafka-2.4.0/tests/0013-null-msgs.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0013-null-msgs.c rename to lib/librdkafka-2.4.0/tests/0013-null-msgs.c diff --git a/lib/librdkafka-2.3.0/tests/0014-reconsume-191.c b/lib/librdkafka-2.4.0/tests/0014-reconsume-191.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0014-reconsume-191.c rename to lib/librdkafka-2.4.0/tests/0014-reconsume-191.c diff --git a/lib/librdkafka-2.3.0/tests/0015-offset_seeks.c b/lib/librdkafka-2.4.0/tests/0015-offset_seeks.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0015-offset_seeks.c rename to lib/librdkafka-2.4.0/tests/0015-offset_seeks.c diff --git a/lib/librdkafka-2.3.0/tests/0016-client_swname.c b/lib/librdkafka-2.4.0/tests/0016-client_swname.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0016-client_swname.c rename to lib/librdkafka-2.4.0/tests/0016-client_swname.c index f8b2cf60741..335925e328d 100644 --- a/lib/librdkafka-2.3.0/tests/0016-client_swname.c +++ b/lib/librdkafka-2.4.0/tests/0016-client_swname.c @@ -141,7 +141,7 @@ int main_0016_client_swname(int argc, char **argv) { "%s/bin/kafka-run-class.sh kafka.tools.JmxTool " "--jmx-url " "service:jmx:rmi:///jndi/rmi://:%s/jmxrmi " - "--attributes connections --one-time true | " + " --one-time true | " "grep clientSoftware", kafka_path, jmx_port); diff --git a/lib/librdkafka-2.3.0/tests/0017-compression.c b/lib/librdkafka-2.4.0/tests/0017-compression.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0017-compression.c rename to lib/librdkafka-2.4.0/tests/0017-compression.c diff --git a/lib/librdkafka-2.3.0/tests/0018-cgrp_term.c b/lib/librdkafka-2.4.0/tests/0018-cgrp_term.c similarity index 99% rename from lib/librdkafka-2.3.0/tests/0018-cgrp_term.c rename to lib/librdkafka-2.4.0/tests/0018-cgrp_term.c index 99a98df4fd5..85ac5612fb5 100644 --- a/lib/librdkafka-2.3.0/tests/0018-cgrp_term.c +++ b/lib/librdkafka-2.4.0/tests/0018-cgrp_term.c @@ -260,7 +260,8 @@ static void do_test(rd_bool_t with_queue) { /* Let remaining consumers run for a while to take over the now * lost partitions. */ - if (assign_cnt != _CONS_CNT - 1) + if (test_consumer_group_protocol_generic() && + assign_cnt != _CONS_CNT - 1) TEST_FAIL("assign_cnt %d, should be %d\n", assign_cnt, _CONS_CNT - 1); diff --git a/lib/librdkafka-2.3.0/tests/0019-list_groups.c b/lib/librdkafka-2.4.0/tests/0019-list_groups.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0019-list_groups.c rename to lib/librdkafka-2.4.0/tests/0019-list_groups.c diff --git a/lib/librdkafka-2.3.0/tests/0020-destroy_hang.c b/lib/librdkafka-2.4.0/tests/0020-destroy_hang.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0020-destroy_hang.c rename to lib/librdkafka-2.4.0/tests/0020-destroy_hang.c diff --git a/lib/librdkafka-2.3.0/tests/0021-rkt_destroy.c b/lib/librdkafka-2.4.0/tests/0021-rkt_destroy.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0021-rkt_destroy.c rename to lib/librdkafka-2.4.0/tests/0021-rkt_destroy.c diff --git a/lib/librdkafka-2.3.0/tests/0022-consume_batch.c b/lib/librdkafka-2.4.0/tests/0022-consume_batch.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0022-consume_batch.c rename to lib/librdkafka-2.4.0/tests/0022-consume_batch.c index 5deccc378fd..97d709201bc 100644 --- a/lib/librdkafka-2.3.0/tests/0022-consume_batch.c +++ b/lib/librdkafka-2.4.0/tests/0022-consume_batch.c @@ -259,7 +259,9 @@ static void do_test_consume_batch_non_existent_topic(void) { int main_0022_consume_batch(int argc, char **argv) { do_test_consume_batch(); - do_test_consume_batch_non_existent_topic(); + if (test_consumer_group_protocol_generic()) { + do_test_consume_batch_non_existent_topic(); + } return 0; } diff --git a/lib/librdkafka-2.3.0/tests/0025-timers.c b/lib/librdkafka-2.4.0/tests/0025-timers.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0025-timers.c rename to lib/librdkafka-2.4.0/tests/0025-timers.c diff --git a/lib/librdkafka-2.3.0/tests/0026-consume_pause.c b/lib/librdkafka-2.4.0/tests/0026-consume_pause.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0026-consume_pause.c rename to lib/librdkafka-2.4.0/tests/0026-consume_pause.c index dfac4572d99..53f27ce11bf 100644 --- a/lib/librdkafka-2.3.0/tests/0026-consume_pause.c +++ b/lib/librdkafka-2.4.0/tests/0026-consume_pause.c @@ -64,6 +64,8 @@ static void consume_pause(void) { test_create_topic(NULL, topic, partition_cnt, 1); + test_wait_topic_exists(NULL, topic, 10 * 1000); + /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); @@ -260,6 +262,8 @@ static void consume_pause_resume_after_reassign(void) { test_create_topic(NULL, topic, (int)partition + 1, 1); + test_wait_topic_exists(NULL, topic, 10 * 1000); + /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); @@ -417,6 +421,8 @@ static void consume_subscribe_assign_pause_resume(void) { test_create_topic(NULL, topic, (int)partition + 1, 1); + test_wait_topic_exists(NULL, topic, 10 * 1000); + /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); @@ -468,6 +474,8 @@ static void consume_seek_pause_resume(void) { test_create_topic(NULL, topic, (int)partition + 1, 1); + test_wait_topic_exists(NULL, topic, 10 * 1000); + /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); diff --git a/lib/librdkafka-2.3.0/tests/0028-long_topicnames.c b/lib/librdkafka-2.4.0/tests/0028-long_topicnames.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0028-long_topicnames.c rename to lib/librdkafka-2.4.0/tests/0028-long_topicnames.c diff --git a/lib/librdkafka-2.3.0/tests/0029-assign_offset.c b/lib/librdkafka-2.4.0/tests/0029-assign_offset.c similarity index 97% rename from lib/librdkafka-2.3.0/tests/0029-assign_offset.c rename to lib/librdkafka-2.4.0/tests/0029-assign_offset.c index 29ec6d9ea8c..1d1edd114f6 100644 --- a/lib/librdkafka-2.3.0/tests/0029-assign_offset.c +++ b/lib/librdkafka-2.4.0/tests/0029-assign_offset.c @@ -111,6 +111,12 @@ int main_0029_assign_offset(int argc, char **argv) { test_timing_t t_simple, t_hl; test_msgver_t mv; + if (!test_consumer_group_protocol_generic()) { + /* FIXME: this should be fixed when upgrading from generic to + * new consumer group will be possible. See KAFKA-15989 */ + return 0; + } + test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000)); /* Produce X messages to Y partitions so we get a diff --git a/lib/librdkafka-2.3.0/tests/0030-offset_commit.c b/lib/librdkafka-2.4.0/tests/0030-offset_commit.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0030-offset_commit.c rename to lib/librdkafka-2.4.0/tests/0030-offset_commit.c diff --git a/lib/librdkafka-2.3.0/tests/0031-get_offsets.c b/lib/librdkafka-2.4.0/tests/0031-get_offsets.c similarity index 96% rename from lib/librdkafka-2.3.0/tests/0031-get_offsets.c rename to lib/librdkafka-2.4.0/tests/0031-get_offsets.c index 573e36b10fe..569e377d3ef 100644 --- a/lib/librdkafka-2.3.0/tests/0031-get_offsets.c +++ b/lib/librdkafka-2.4.0/tests/0031-get_offsets.c @@ -50,10 +50,7 @@ void test_query_watermark_offsets_timeout(void) { const char *bootstraps; const int timeout_ms = 1000; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return; - } + TEST_SKIP_MOCK_CLUSTER(); SUB_TEST_QUICK(); @@ -97,10 +94,7 @@ void test_query_watermark_offsets_leader_change(void) { const char *bootstraps; const int timeout_ms = 1000; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return; - } + TEST_SKIP_MOCK_CLUSTER(); SUB_TEST_QUICK(); diff --git a/lib/librdkafka-2.3.0/tests/0033-regex_subscribe.c b/lib/librdkafka-2.4.0/tests/0033-regex_subscribe.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0033-regex_subscribe.c rename to lib/librdkafka-2.4.0/tests/0033-regex_subscribe.c index be974d0628d..0919f705191 100644 --- a/lib/librdkafka-2.3.0/tests/0033-regex_subscribe.c +++ b/lib/librdkafka-2.4.0/tests/0033-regex_subscribe.c @@ -434,8 +434,11 @@ static int do_test(const char *assignor) { int main_0033_regex_subscribe(int argc, char **argv) { - do_test("range"); - do_test("roundrobin"); + if (test_consumer_group_protocol_generic()) { + /* FIXME: when regexes will be supported by KIP-848 */ + do_test("range"); + do_test("roundrobin"); + } return 0; } diff --git a/lib/librdkafka-2.3.0/tests/0034-offset_reset.c b/lib/librdkafka-2.4.0/tests/0034-offset_reset.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0034-offset_reset.c rename to lib/librdkafka-2.4.0/tests/0034-offset_reset.c diff --git a/lib/librdkafka-2.3.0/tests/0035-api_version.c b/lib/librdkafka-2.4.0/tests/0035-api_version.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0035-api_version.c rename to lib/librdkafka-2.4.0/tests/0035-api_version.c diff --git a/lib/librdkafka-2.3.0/tests/0036-partial_fetch.c b/lib/librdkafka-2.4.0/tests/0036-partial_fetch.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0036-partial_fetch.c rename to lib/librdkafka-2.4.0/tests/0036-partial_fetch.c diff --git a/lib/librdkafka-2.3.0/tests/0037-destroy_hang_local.c b/lib/librdkafka-2.4.0/tests/0037-destroy_hang_local.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0037-destroy_hang_local.c rename to lib/librdkafka-2.4.0/tests/0037-destroy_hang_local.c diff --git a/lib/librdkafka-2.3.0/tests/0038-performance.c b/lib/librdkafka-2.4.0/tests/0038-performance.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0038-performance.c rename to lib/librdkafka-2.4.0/tests/0038-performance.c diff --git a/lib/librdkafka-2.3.0/tests/0039-event.c b/lib/librdkafka-2.4.0/tests/0039-event.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0039-event.c rename to lib/librdkafka-2.4.0/tests/0039-event.c diff --git a/lib/librdkafka-2.3.0/tests/0040-io_event.c b/lib/librdkafka-2.4.0/tests/0040-io_event.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0040-io_event.c rename to lib/librdkafka-2.4.0/tests/0040-io_event.c diff --git a/lib/librdkafka-2.3.0/tests/0041-fetch_max_bytes.c b/lib/librdkafka-2.4.0/tests/0041-fetch_max_bytes.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0041-fetch_max_bytes.c rename to lib/librdkafka-2.4.0/tests/0041-fetch_max_bytes.c diff --git a/lib/librdkafka-2.3.0/tests/0042-many_topics.c b/lib/librdkafka-2.4.0/tests/0042-many_topics.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0042-many_topics.c rename to lib/librdkafka-2.4.0/tests/0042-many_topics.c diff --git a/lib/librdkafka-2.3.0/tests/0043-no_connection.c b/lib/librdkafka-2.4.0/tests/0043-no_connection.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0043-no_connection.c rename to lib/librdkafka-2.4.0/tests/0043-no_connection.c diff --git a/lib/librdkafka-2.3.0/tests/0044-partition_cnt.c b/lib/librdkafka-2.4.0/tests/0044-partition_cnt.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0044-partition_cnt.c rename to lib/librdkafka-2.4.0/tests/0044-partition_cnt.c diff --git a/lib/librdkafka-2.3.0/tests/0045-subscribe_update.c b/lib/librdkafka-2.4.0/tests/0045-subscribe_update.c similarity index 99% rename from lib/librdkafka-2.3.0/tests/0045-subscribe_update.c rename to lib/librdkafka-2.4.0/tests/0045-subscribe_update.c index cf013c5bdaf..c4daa4780f0 100644 --- a/lib/librdkafka-2.3.0/tests/0045-subscribe_update.c +++ b/lib/librdkafka-2.4.0/tests/0045-subscribe_update.c @@ -718,10 +718,7 @@ int main_0045_subscribe_update_racks_mock(int argc, char **argv) { int use_replica_rack = 0; int use_client_rack = 0; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); for (use_replica_rack = 0; use_replica_rack < 2; use_replica_rack++) { for (use_client_rack = 0; use_client_rack < 2; diff --git a/lib/librdkafka-2.3.0/tests/0046-rkt_cache.c b/lib/librdkafka-2.4.0/tests/0046-rkt_cache.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0046-rkt_cache.c rename to lib/librdkafka-2.4.0/tests/0046-rkt_cache.c diff --git a/lib/librdkafka-2.3.0/tests/0047-partial_buf_tmout.c b/lib/librdkafka-2.4.0/tests/0047-partial_buf_tmout.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0047-partial_buf_tmout.c rename to lib/librdkafka-2.4.0/tests/0047-partial_buf_tmout.c diff --git a/lib/librdkafka-2.3.0/tests/0048-partitioner.c b/lib/librdkafka-2.4.0/tests/0048-partitioner.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0048-partitioner.c rename to lib/librdkafka-2.4.0/tests/0048-partitioner.c diff --git a/lib/librdkafka-2.3.0/tests/0049-consume_conn_close.c b/lib/librdkafka-2.4.0/tests/0049-consume_conn_close.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0049-consume_conn_close.c rename to lib/librdkafka-2.4.0/tests/0049-consume_conn_close.c diff --git a/lib/librdkafka-2.3.0/tests/0050-subscribe_adds.c b/lib/librdkafka-2.4.0/tests/0050-subscribe_adds.c similarity index 88% rename from lib/librdkafka-2.3.0/tests/0050-subscribe_adds.c rename to lib/librdkafka-2.4.0/tests/0050-subscribe_adds.c index 299c6b95d85..acde518e475 100644 --- a/lib/librdkafka-2.3.0/tests/0050-subscribe_adds.c +++ b/lib/librdkafka-2.4.0/tests/0050-subscribe_adds.c @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,6 +29,7 @@ #include "test.h" #include "rdkafka.h" +#include "../src/rdkafka_proto.h" #include @@ -41,9 +43,13 @@ * * Verify that all messages from all three topics are consumed * * Subscribe to T1,T3 * * Verify that there were no duplicate messages. + * + * @param partition_assignment_strategy Assignment strategy to test. */ +static void +test_no_duplicate_messages(const char *partition_assignment_strategy) { -int main_0050_subscribe_adds(int argc, char **argv) { + SUB_TEST("%s", partition_assignment_strategy); rd_kafka_t *rk; #define TOPIC_CNT 3 char *topic[TOPIC_CNT] = { @@ -80,6 +86,8 @@ int main_0050_subscribe_adds(int argc, char **argv) { test_conf_init(&conf, &tconf, 60); test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_conf_set(conf, "partition.assignment.strategy", + partition_assignment_strategy); rk = test_create_consumer(topic[0], NULL, conf, tconf); @@ -105,7 +113,7 @@ int main_0050_subscribe_adds(int argc, char **argv) { err = rd_kafka_subscribe(rk, tlist); TEST_ASSERT(!err, "subscribe() failed: %s", rd_kafka_err2str(err)); - test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5)); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(3000)); test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, @@ -120,5 +128,17 @@ int main_0050_subscribe_adds(int argc, char **argv) { for (i = 0; i < TOPIC_CNT; i++) rd_free(topic[i]); + SUB_TEST_PASS(); +#undef TOPIC_CNT +} + +int main_0050_subscribe_adds(int argc, char **argv) { + + test_no_duplicate_messages("range"); + + test_no_duplicate_messages("roundrobin"); + + test_no_duplicate_messages("cooperative-sticky"); + return 0; } diff --git a/lib/librdkafka-2.3.0/tests/0051-assign_adds.c b/lib/librdkafka-2.4.0/tests/0051-assign_adds.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0051-assign_adds.c rename to lib/librdkafka-2.4.0/tests/0051-assign_adds.c diff --git a/lib/librdkafka-2.3.0/tests/0052-msg_timestamps.c b/lib/librdkafka-2.4.0/tests/0052-msg_timestamps.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0052-msg_timestamps.c rename to lib/librdkafka-2.4.0/tests/0052-msg_timestamps.c diff --git a/lib/librdkafka-2.3.0/tests/0053-stats_cb.cpp b/lib/librdkafka-2.4.0/tests/0053-stats_cb.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0053-stats_cb.cpp rename to lib/librdkafka-2.4.0/tests/0053-stats_cb.cpp diff --git a/lib/librdkafka-2.3.0/tests/0054-offset_time.cpp b/lib/librdkafka-2.4.0/tests/0054-offset_time.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0054-offset_time.cpp rename to lib/librdkafka-2.4.0/tests/0054-offset_time.cpp diff --git a/lib/librdkafka-2.3.0/tests/0055-producer_latency.c b/lib/librdkafka-2.4.0/tests/0055-producer_latency.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0055-producer_latency.c rename to lib/librdkafka-2.4.0/tests/0055-producer_latency.c diff --git a/lib/librdkafka-2.3.0/tests/0056-balanced_group_mt.c b/lib/librdkafka-2.4.0/tests/0056-balanced_group_mt.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0056-balanced_group_mt.c rename to lib/librdkafka-2.4.0/tests/0056-balanced_group_mt.c diff --git a/lib/librdkafka-2.3.0/tests/0057-invalid_topic.cpp b/lib/librdkafka-2.4.0/tests/0057-invalid_topic.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0057-invalid_topic.cpp rename to lib/librdkafka-2.4.0/tests/0057-invalid_topic.cpp diff --git a/lib/librdkafka-2.3.0/tests/0058-log.cpp b/lib/librdkafka-2.4.0/tests/0058-log.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0058-log.cpp rename to lib/librdkafka-2.4.0/tests/0058-log.cpp diff --git a/lib/librdkafka-2.3.0/tests/0059-bsearch.cpp b/lib/librdkafka-2.4.0/tests/0059-bsearch.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0059-bsearch.cpp rename to lib/librdkafka-2.4.0/tests/0059-bsearch.cpp diff --git a/lib/librdkafka-2.3.0/tests/0060-op_prio.cpp b/lib/librdkafka-2.4.0/tests/0060-op_prio.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0060-op_prio.cpp rename to lib/librdkafka-2.4.0/tests/0060-op_prio.cpp diff --git a/lib/librdkafka-2.3.0/tests/0061-consumer_lag.cpp b/lib/librdkafka-2.4.0/tests/0061-consumer_lag.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0061-consumer_lag.cpp rename to lib/librdkafka-2.4.0/tests/0061-consumer_lag.cpp diff --git a/lib/librdkafka-2.3.0/tests/0062-stats_event.c b/lib/librdkafka-2.4.0/tests/0062-stats_event.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0062-stats_event.c rename to lib/librdkafka-2.4.0/tests/0062-stats_event.c diff --git a/lib/librdkafka-2.3.0/tests/0063-clusterid.cpp b/lib/librdkafka-2.4.0/tests/0063-clusterid.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0063-clusterid.cpp rename to lib/librdkafka-2.4.0/tests/0063-clusterid.cpp diff --git a/lib/librdkafka-2.3.0/tests/0064-interceptors.c b/lib/librdkafka-2.4.0/tests/0064-interceptors.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0064-interceptors.c rename to lib/librdkafka-2.4.0/tests/0064-interceptors.c diff --git a/lib/librdkafka-2.3.0/tests/0065-yield.cpp b/lib/librdkafka-2.4.0/tests/0065-yield.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0065-yield.cpp rename to lib/librdkafka-2.4.0/tests/0065-yield.cpp diff --git a/lib/librdkafka-2.3.0/tests/0066-plugins.cpp b/lib/librdkafka-2.4.0/tests/0066-plugins.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0066-plugins.cpp rename to lib/librdkafka-2.4.0/tests/0066-plugins.cpp diff --git a/lib/librdkafka-2.3.0/tests/0067-empty_topic.cpp b/lib/librdkafka-2.4.0/tests/0067-empty_topic.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0067-empty_topic.cpp rename to lib/librdkafka-2.4.0/tests/0067-empty_topic.cpp diff --git a/lib/librdkafka-2.3.0/tests/0068-produce_timeout.c b/lib/librdkafka-2.4.0/tests/0068-produce_timeout.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0068-produce_timeout.c rename to lib/librdkafka-2.4.0/tests/0068-produce_timeout.c diff --git a/lib/librdkafka-2.3.0/tests/0069-consumer_add_parts.c b/lib/librdkafka-2.4.0/tests/0069-consumer_add_parts.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0069-consumer_add_parts.c rename to lib/librdkafka-2.4.0/tests/0069-consumer_add_parts.c diff --git a/lib/librdkafka-2.3.0/tests/0070-null_empty.cpp b/lib/librdkafka-2.4.0/tests/0070-null_empty.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0070-null_empty.cpp rename to lib/librdkafka-2.4.0/tests/0070-null_empty.cpp diff --git a/lib/librdkafka-2.3.0/tests/0072-headers_ut.c b/lib/librdkafka-2.4.0/tests/0072-headers_ut.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0072-headers_ut.c rename to lib/librdkafka-2.4.0/tests/0072-headers_ut.c diff --git a/lib/librdkafka-2.3.0/tests/0073-headers.c b/lib/librdkafka-2.4.0/tests/0073-headers.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0073-headers.c rename to lib/librdkafka-2.4.0/tests/0073-headers.c diff --git a/lib/librdkafka-2.3.0/tests/0074-producev.c b/lib/librdkafka-2.4.0/tests/0074-producev.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0074-producev.c rename to lib/librdkafka-2.4.0/tests/0074-producev.c diff --git a/lib/librdkafka-2.3.0/tests/0075-retry.c b/lib/librdkafka-2.4.0/tests/0075-retry.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0075-retry.c rename to lib/librdkafka-2.4.0/tests/0075-retry.c diff --git a/lib/librdkafka-2.3.0/tests/0076-produce_retry.c b/lib/librdkafka-2.4.0/tests/0076-produce_retry.c similarity index 78% rename from lib/librdkafka-2.3.0/tests/0076-produce_retry.c rename to lib/librdkafka-2.4.0/tests/0076-produce_retry.c index 86cc0bfb355..2ea9dfa4fd2 100644 --- a/lib/librdkafka-2.3.0/tests/0076-produce_retry.c +++ b/lib/librdkafka-2.4.0/tests/0076-produce_retry.c @@ -28,6 +28,7 @@ #include "test.h" #include "rdkafka.h" +#include "../src/rdkafka_proto.h" #include #include @@ -320,6 +321,89 @@ static void do_test_produce_retries_disconnect(const char *topic, idempotence, try_fail, should_fail); } +/** + * TODO: replace with rd_kafka_mock_request_destroy_array when merged + */ +static void free_mock_requests(rd_kafka_mock_request_t **requests, + size_t request_cnt) { + size_t i; + for (i = 0; i < request_cnt; i++) + rd_kafka_mock_request_destroy(requests[i]); + rd_free(requests); +} + +/** + * @brief Wait at least \p num produce requests + * have been received by the mock cluster + * plus \p confidence_interval_ms has passed + * + * @return Number of produce requests received. + */ +static int wait_produce_requests_done(rd_kafka_mock_cluster_t *mcluster, + int num, + int confidence_interval_ms) { + size_t i; + rd_kafka_mock_request_t **requests; + size_t request_cnt; + int matching_requests = 0; + rd_bool_t last_time = rd_true; + + while (matching_requests < num || last_time) { + if (matching_requests >= num) { + rd_usleep(confidence_interval_ms * 1000, 0); + last_time = rd_false; + } + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + matching_requests = 0; + for (i = 0; i < request_cnt; i++) { + if (rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_Produce) + matching_requests++; + } + free_mock_requests(requests, request_cnt); + rd_usleep(100 * 1000, 0); + } + return matching_requests; +} + +/** + * @brief Producer should retry produce requests after receiving + * INVALID_MSG from the broker. + */ +static void do_test_produce_retry_invalid_msg(rd_kafka_mock_cluster_t *mcluster, + const char *bootstraps) { + rd_kafka_t *producer; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + int produce_request_cnt; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + + SUB_TEST_QUICK(); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(producer, topic, NULL); + + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_INVALID_MSG); + test_produce_msgs(producer, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 1, + "hello", 6); + produce_request_cnt = wait_produce_requests_done(mcluster, 2, 100); + TEST_ASSERT(produce_request_cnt == 2, + "Expected 2 produce requests, got %d\n", + produce_request_cnt); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(producer); + rd_kafka_mock_stop_request_tracking(mcluster); + SUB_TEST_PASS(); +} int main_0076_produce_retry(int argc, char **argv) { const char *topic = test_mk_topic_name("0076_produce_retry", 1); @@ -348,3 +432,19 @@ int main_0076_produce_retry(int argc, char **argv) { return 0; } + +int main_0076_produce_retry_mock(int argc, char **argv) { + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + + if (test_needs_auth()) { + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + mcluster = test_mock_cluster_new(1, &bootstraps); + do_test_produce_retry_invalid_msg(mcluster, bootstraps); + test_mock_cluster_destroy(mcluster); + + return 0; +} diff --git a/lib/librdkafka-2.3.0/tests/0077-compaction.c b/lib/librdkafka-2.4.0/tests/0077-compaction.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0077-compaction.c rename to lib/librdkafka-2.4.0/tests/0077-compaction.c diff --git a/lib/librdkafka-2.3.0/tests/0078-c_from_cpp.cpp b/lib/librdkafka-2.4.0/tests/0078-c_from_cpp.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0078-c_from_cpp.cpp rename to lib/librdkafka-2.4.0/tests/0078-c_from_cpp.cpp diff --git a/lib/librdkafka-2.3.0/tests/0079-fork.c b/lib/librdkafka-2.4.0/tests/0079-fork.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0079-fork.c rename to lib/librdkafka-2.4.0/tests/0079-fork.c diff --git a/lib/librdkafka-2.3.0/tests/0080-admin_ut.c b/lib/librdkafka-2.4.0/tests/0080-admin_ut.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0080-admin_ut.c rename to lib/librdkafka-2.4.0/tests/0080-admin_ut.c diff --git a/lib/librdkafka-2.3.0/tests/0081-admin.c b/lib/librdkafka-2.4.0/tests/0081-admin.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0081-admin.c rename to lib/librdkafka-2.4.0/tests/0081-admin.c diff --git a/lib/librdkafka-2.3.0/tests/0082-fetch_max_bytes.cpp b/lib/librdkafka-2.4.0/tests/0082-fetch_max_bytes.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0082-fetch_max_bytes.cpp rename to lib/librdkafka-2.4.0/tests/0082-fetch_max_bytes.cpp diff --git a/lib/librdkafka-2.3.0/tests/0083-cb_event.c b/lib/librdkafka-2.4.0/tests/0083-cb_event.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0083-cb_event.c rename to lib/librdkafka-2.4.0/tests/0083-cb_event.c diff --git a/lib/librdkafka-2.3.0/tests/0084-destroy_flags.c b/lib/librdkafka-2.4.0/tests/0084-destroy_flags.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0084-destroy_flags.c rename to lib/librdkafka-2.4.0/tests/0084-destroy_flags.c diff --git a/lib/librdkafka-2.3.0/tests/0085-headers.cpp b/lib/librdkafka-2.4.0/tests/0085-headers.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0085-headers.cpp rename to lib/librdkafka-2.4.0/tests/0085-headers.cpp diff --git a/lib/librdkafka-2.3.0/tests/0086-purge.c b/lib/librdkafka-2.4.0/tests/0086-purge.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0086-purge.c rename to lib/librdkafka-2.4.0/tests/0086-purge.c diff --git a/lib/librdkafka-2.3.0/tests/0088-produce_metadata_timeout.c b/lib/librdkafka-2.4.0/tests/0088-produce_metadata_timeout.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0088-produce_metadata_timeout.c rename to lib/librdkafka-2.4.0/tests/0088-produce_metadata_timeout.c diff --git a/lib/librdkafka-2.3.0/tests/0089-max_poll_interval.c b/lib/librdkafka-2.4.0/tests/0089-max_poll_interval.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0089-max_poll_interval.c rename to lib/librdkafka-2.4.0/tests/0089-max_poll_interval.c diff --git a/lib/librdkafka-2.3.0/tests/0090-idempotence.c b/lib/librdkafka-2.4.0/tests/0090-idempotence.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0090-idempotence.c rename to lib/librdkafka-2.4.0/tests/0090-idempotence.c diff --git a/lib/librdkafka-2.3.0/tests/0091-max_poll_interval_timeout.c b/lib/librdkafka-2.4.0/tests/0091-max_poll_interval_timeout.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0091-max_poll_interval_timeout.c rename to lib/librdkafka-2.4.0/tests/0091-max_poll_interval_timeout.c diff --git a/lib/librdkafka-2.3.0/tests/0092-mixed_msgver.c b/lib/librdkafka-2.4.0/tests/0092-mixed_msgver.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0092-mixed_msgver.c rename to lib/librdkafka-2.4.0/tests/0092-mixed_msgver.c diff --git a/lib/librdkafka-2.3.0/tests/0093-holb.c b/lib/librdkafka-2.4.0/tests/0093-holb.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0093-holb.c rename to lib/librdkafka-2.4.0/tests/0093-holb.c diff --git a/lib/librdkafka-2.3.0/tests/0094-idempotence_msg_timeout.c b/lib/librdkafka-2.4.0/tests/0094-idempotence_msg_timeout.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0094-idempotence_msg_timeout.c rename to lib/librdkafka-2.4.0/tests/0094-idempotence_msg_timeout.c diff --git a/lib/librdkafka-2.3.0/tests/0095-all_brokers_down.cpp b/lib/librdkafka-2.4.0/tests/0095-all_brokers_down.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0095-all_brokers_down.cpp rename to lib/librdkafka-2.4.0/tests/0095-all_brokers_down.cpp diff --git a/lib/librdkafka-2.3.0/tests/0097-ssl_verify.cpp b/lib/librdkafka-2.4.0/tests/0097-ssl_verify.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0097-ssl_verify.cpp rename to lib/librdkafka-2.4.0/tests/0097-ssl_verify.cpp diff --git a/lib/librdkafka-2.3.0/tests/0098-consumer-txn.cpp b/lib/librdkafka-2.4.0/tests/0098-consumer-txn.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0098-consumer-txn.cpp rename to lib/librdkafka-2.4.0/tests/0098-consumer-txn.cpp diff --git a/lib/librdkafka-2.3.0/tests/0099-commit_metadata.c b/lib/librdkafka-2.4.0/tests/0099-commit_metadata.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0099-commit_metadata.c rename to lib/librdkafka-2.4.0/tests/0099-commit_metadata.c diff --git a/lib/librdkafka-2.3.0/tests/0100-thread_interceptors.cpp b/lib/librdkafka-2.4.0/tests/0100-thread_interceptors.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0100-thread_interceptors.cpp rename to lib/librdkafka-2.4.0/tests/0100-thread_interceptors.cpp diff --git a/lib/librdkafka-2.3.0/tests/0101-fetch-from-follower.cpp b/lib/librdkafka-2.4.0/tests/0101-fetch-from-follower.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0101-fetch-from-follower.cpp rename to lib/librdkafka-2.4.0/tests/0101-fetch-from-follower.cpp diff --git a/lib/librdkafka-2.3.0/tests/0102-static_group_rebalance.c b/lib/librdkafka-2.4.0/tests/0102-static_group_rebalance.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0102-static_group_rebalance.c rename to lib/librdkafka-2.4.0/tests/0102-static_group_rebalance.c diff --git a/lib/librdkafka-2.3.0/tests/0103-transactions.c b/lib/librdkafka-2.4.0/tests/0103-transactions.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0103-transactions.c rename to lib/librdkafka-2.4.0/tests/0103-transactions.c diff --git a/lib/librdkafka-2.3.0/tests/0104-fetch_from_follower_mock.c b/lib/librdkafka-2.4.0/tests/0104-fetch_from_follower_mock.c similarity index 99% rename from lib/librdkafka-2.3.0/tests/0104-fetch_from_follower_mock.c rename to lib/librdkafka-2.4.0/tests/0104-fetch_from_follower_mock.c index 5863638da3b..972ff9c5185 100644 --- a/lib/librdkafka-2.3.0/tests/0104-fetch_from_follower_mock.c +++ b/lib/librdkafka-2.4.0/tests/0104-fetch_from_follower_mock.c @@ -591,10 +591,7 @@ static void do_test_seek_to_offset_with_previous_epoch(void) { int main_0104_fetch_from_follower_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); test_timeout_set(50); diff --git a/lib/librdkafka-2.3.0/tests/0105-transactions_mock.c b/lib/librdkafka-2.4.0/tests/0105-transactions_mock.c similarity index 99% rename from lib/librdkafka-2.3.0/tests/0105-transactions_mock.c rename to lib/librdkafka-2.4.0/tests/0105-transactions_mock.c index 8d6173c7f09..04958f7d2a0 100644 --- a/lib/librdkafka-2.3.0/tests/0105-transactions_mock.c +++ b/lib/librdkafka-2.4.0/tests/0105-transactions_mock.c @@ -3822,10 +3822,7 @@ do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_bool_t times_out) { int main_0105_transactions_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_txn_recoverable_errors(); diff --git a/lib/librdkafka-2.3.0/tests/0106-cgrp_sess_timeout.c b/lib/librdkafka-2.4.0/tests/0106-cgrp_sess_timeout.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0106-cgrp_sess_timeout.c rename to lib/librdkafka-2.4.0/tests/0106-cgrp_sess_timeout.c index ca0a08c20a3..6d9f43f1603 100644 --- a/lib/librdkafka-2.3.0/tests/0106-cgrp_sess_timeout.c +++ b/lib/librdkafka-2.4.0/tests/0106-cgrp_sess_timeout.c @@ -285,10 +285,7 @@ static void do_test_commit_on_lost(void) { int main_0106_cgrp_sess_timeout(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_session_timeout("sync"); do_test_session_timeout("async"); diff --git a/lib/librdkafka-2.3.0/tests/0107-topic_recreate.c b/lib/librdkafka-2.4.0/tests/0107-topic_recreate.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0107-topic_recreate.c rename to lib/librdkafka-2.4.0/tests/0107-topic_recreate.c diff --git a/lib/librdkafka-2.3.0/tests/0109-auto_create_topics.cpp b/lib/librdkafka-2.4.0/tests/0109-auto_create_topics.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0109-auto_create_topics.cpp rename to lib/librdkafka-2.4.0/tests/0109-auto_create_topics.cpp diff --git a/lib/librdkafka-2.3.0/tests/0110-batch_size.cpp b/lib/librdkafka-2.4.0/tests/0110-batch_size.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0110-batch_size.cpp rename to lib/librdkafka-2.4.0/tests/0110-batch_size.cpp diff --git a/lib/librdkafka-2.3.0/tests/0111-delay_create_topics.cpp b/lib/librdkafka-2.4.0/tests/0111-delay_create_topics.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0111-delay_create_topics.cpp rename to lib/librdkafka-2.4.0/tests/0111-delay_create_topics.cpp diff --git a/lib/librdkafka-2.3.0/tests/0112-assign_unknown_part.c b/lib/librdkafka-2.4.0/tests/0112-assign_unknown_part.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0112-assign_unknown_part.c rename to lib/librdkafka-2.4.0/tests/0112-assign_unknown_part.c diff --git a/lib/librdkafka-2.3.0/tests/0113-cooperative_rebalance.cpp b/lib/librdkafka-2.4.0/tests/0113-cooperative_rebalance.cpp similarity index 87% rename from lib/librdkafka-2.3.0/tests/0113-cooperative_rebalance.cpp rename to lib/librdkafka-2.4.0/tests/0113-cooperative_rebalance.cpp index c54619d714a..e94b1b78535 100644 --- a/lib/librdkafka-2.3.0/tests/0113-cooperative_rebalance.cpp +++ b/lib/librdkafka-2.4.0/tests/0113-cooperative_rebalance.cpp @@ -150,6 +150,11 @@ static RdKafka::KafkaConsumer *make_consumer( Test::conf_set(conf, "auto.offset.reset", "earliest"); Test::conf_set(conf, "enable.auto.commit", "false"); Test::conf_set(conf, "partition.assignment.strategy", assignment_strategy); + + if (test_consumer_group_protocol()) { + Test::conf_set(conf, "group.protocol", test_consumer_group_protocol()); + } + if (additional_conf != NULL) { for (itr = (*additional_conf).begin(); itr != (*additional_conf).end(); itr++) @@ -284,9 +289,12 @@ class DefaultRebalanceCb : public RdKafka::RebalanceCb { std::string protocol = consumer->rebalance_protocol(); - TEST_ASSERT(protocol == "COOPERATIVE", - "%s: Expected rebalance_protocol \"COOPERATIVE\", not %s", - consumer->name().c_str(), protocol.c_str()); + if (protocol != "") { + /* Consumer hasn't been closed */ + TEST_ASSERT(protocol == "COOPERATIVE", + "%s: Expected rebalance_protocol \"COOPERATIVE\", not %s", + consumer->name().c_str(), protocol.c_str()); + } const char *lost_str = consumer->assignment_lost() ? " (LOST)" : ""; Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << ": " @@ -649,6 +657,9 @@ static void a_assign_tests() { std::string topic2_str = Test::mk_topic_name("0113-a2", 1); test_create_topic(NULL, topic2_str.c_str(), 1, 1); + test_wait_topic_exists(NULL, topic1_str.c_str(), 10 * 1000); + test_wait_topic_exists(NULL, topic2_str.c_str(), 10 * 1000); + test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1); test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2); @@ -728,6 +739,9 @@ static void a_assign_rapid() { Test::conf_set(conf, "group.id", group_id); Test::conf_set(conf, "auto.offset.reset", "earliest"); Test::conf_set(conf, "enable.auto.commit", "false"); + if (test_consumer_group_protocol()) { + Test::conf_set(conf, "group.protocol", test_consumer_group_protocol()); + } RdKafka::KafkaConsumer *consumer; consumer = RdKafka::KafkaConsumer::create(conf, errstr); @@ -883,7 +897,9 @@ static void a_assign_rapid() { */ static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { - SUB_TEST(); + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); + int expected_cb1_assign_call_cnt = 3; + int expected_cb2_assign_call_cnt = 2; std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); std::string group_name = @@ -906,14 +922,18 @@ static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { Test::poll_once(c2, 500); /* Start c2 after c1 has received initial assignment */ - if (!c2_subscribed && rebalance_cb1.assign_call_cnt > 0) { + if (!c2_subscribed && rebalance_cb1.nonempty_assign_call_cnt > 0) { Test::subscribe(c2, topic_name); c2_subscribed = true; } /* Failure case: test will time out. */ - if (rebalance_cb1.assign_call_cnt == 3 && - rebalance_cb2.assign_call_cnt == 2) { + if (Test::assignment_partition_count(c1, NULL) == 1 && + Test::assignment_partition_count(c2, NULL) == 1) { + if (test_consumer_group_protocol_generic() && + !(rebalance_cb1.assign_call_cnt == expected_cb1_assign_call_cnt && + rebalance_cb2.assign_call_cnt == expected_cb2_assign_call_cnt)) + continue; break; } } @@ -921,36 +941,41 @@ static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { /* Sequence of events: * * 1. c1 joins group. - * 2. c1 gets assigned 2 partitions. + * 2. c1 gets assigned 2 partitions (+1 assign call). * - there isn't a follow-on rebalance because there aren't any revoked * partitions. * 3. c2 joins group. - * 4. This results in a rebalance with one partition being revoked from c1, - * and no partitions assigned to either c1 or c2 (however the rebalance - * callback will be called in each case with an empty set). + * 4. This results in a rebalance with one partition being revoked from c1 (+1 + * revoke call), and no partitions assigned to either c1 (+1 assign call) or + * c2 (+1 assign call) (however the rebalance callback will be called in each + * case with an empty set). * 5. c1 then re-joins the group since it had a partition revoked. - * 6. c2 is now assigned a single partition, and c1's incremental assignment - * is empty. + * 6. c2 is now assigned a single partition (+1 assign call), and c1's + * incremental assignment is empty (+1 assign call). * 7. Since there were no revoked partitions, no further rebalance is * triggered. */ - /* The rebalance cb is always called on assign, even if empty. */ - if (rebalance_cb1.assign_call_cnt != 3) - Test::Fail(tostr() << "Expecting 3 assign calls on consumer 1, not " - << rebalance_cb1.assign_call_cnt); - if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expecting 2 assign calls on consumer 2, not: " - << rebalance_cb2.assign_call_cnt); - - /* The rebalance cb is not called on and empty revoke (unless partitions lost, - * which is not the case here) */ - if (rebalance_cb1.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: " - << rebalance_cb1.revoke_call_cnt); - if (rebalance_cb2.revoke_call_cnt != 0) - Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: " - << rebalance_cb2.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + /* The rebalance cb is always called on assign, even if empty. */ + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expecting " << expected_cb1_assign_call_cnt + << " assign calls on consumer 1, not " + << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting " << expected_cb2_assign_call_cnt + << " assign calls on consumer 2, not: " + << rebalance_cb2.assign_call_cnt); + + /* The rebalance cb is not called on and empty revoke (unless partitions + * lost, which is not the case here) */ + if (rebalance_cb1.revoke_call_cnt != 1) + Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: " + << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != 0) + Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: " + << rebalance_cb2.revoke_call_cnt); + } /* Final state */ @@ -1004,13 +1029,15 @@ static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { c1->close(); c2->close(); - /* Closing the consumer should trigger rebalance_cb (revoke): */ - if (rebalance_cb1.revoke_call_cnt != 2) - Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: " - << rebalance_cb1.revoke_call_cnt); - if (rebalance_cb2.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: " - << rebalance_cb2.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + /* Closing the consumer should trigger rebalance_cb (revoke): */ + if (rebalance_cb1.revoke_call_cnt != 2) + Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: " + << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != 1) + Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: " + << rebalance_cb2.revoke_call_cnt); + } /* ..and net assigned partitions should drop to 0 in both cases: */ if (rebalance_cb1.partitions_assigned_net != 0) @@ -1050,7 +1077,7 @@ static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { */ static void c_subscribe_no_cb_test(rd_bool_t close_consumer) { - SUB_TEST(); + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); std::string group_name = @@ -1107,7 +1134,7 @@ static void c_subscribe_no_cb_test(rd_bool_t close_consumer) { */ static void d_change_subscription_add_topic(rd_bool_t close_consumer) { - SUB_TEST(); + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); @@ -1163,7 +1190,7 @@ static void d_change_subscription_add_topic(rd_bool_t close_consumer) { */ static void e_change_subscription_remove_topic(rd_bool_t close_consumer) { - SUB_TEST(); + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); @@ -1446,9 +1473,11 @@ static void h_delete_topic() { c->assignment(partitions); if (partitions.size() == 2 && !deleted) { - if (rebalance_cb.assign_call_cnt != 1) + if (test_consumer_group_protocol_generic() && + rebalance_cb.assign_call_cnt != 1) Test::Fail(tostr() << "Expected 1 assign call, saw " << rebalance_cb.assign_call_cnt << "\n"); + Test::delete_topic(c, topic_name_2.c_str()); deleted = true; } @@ -1508,7 +1537,8 @@ static void i_delete_topic_2() { Test::poll_once(c, 500); if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) { - if (rebalance_cb.assign_call_cnt != 1) + if (test_consumer_group_protocol_generic() && + rebalance_cb.assign_call_cnt != 1) Test::Fail(tostr() << "Expected one assign call, saw " << rebalance_cb.assign_call_cnt << "\n"); Test::delete_topic(c, topic_name_1.c_str()); @@ -1614,23 +1644,27 @@ static void k_add_partition() { Test::poll_once(c, 500); if (Test::assignment_partition_count(c, NULL) == 1 && !subscribed) { - if (rebalance_cb.assign_call_cnt != 1) - Test::Fail(tostr() << "Expected 1 assign call, saw " - << rebalance_cb.assign_call_cnt); - if (rebalance_cb.revoke_call_cnt != 0) - Test::Fail(tostr() << "Expected 0 revoke calls, saw " - << rebalance_cb.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb.assign_call_cnt != 1) + Test::Fail(tostr() << "Expected 1 assign call, saw " + << rebalance_cb.assign_call_cnt); + if (rebalance_cb.revoke_call_cnt != 0) + Test::Fail(tostr() << "Expected 0 revoke calls, saw " + << rebalance_cb.revoke_call_cnt); + } Test::create_partitions(c, topic_name.c_str(), 2); subscribed = true; } if (Test::assignment_partition_count(c, NULL) == 2 && subscribed) { - if (rebalance_cb.assign_call_cnt != 2) - Test::Fail(tostr() << "Expected 2 assign calls, saw " - << rebalance_cb.assign_call_cnt); - if (rebalance_cb.revoke_call_cnt != 0) - Test::Fail(tostr() << "Expected 0 revoke calls, saw " - << rebalance_cb.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb.assign_call_cnt != 2) + Test::Fail(tostr() << "Expected 2 assign calls, saw " + << rebalance_cb.assign_call_cnt); + if (rebalance_cb.revoke_call_cnt != 0) + Test::Fail(tostr() << "Expected 0 revoke calls, saw " + << rebalance_cb.revoke_call_cnt); + } done = true; } } @@ -1639,12 +1673,14 @@ static void k_add_partition() { c->close(); delete c; - if (rebalance_cb.assign_call_cnt != 2) - Test::Fail(tostr() << "Expected 2 assign calls, saw " - << rebalance_cb.assign_call_cnt); - if (rebalance_cb.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expected 1 revoke call, saw " - << rebalance_cb.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb.assign_call_cnt != 2) + Test::Fail(tostr() << "Expected 2 assign calls, saw " + << rebalance_cb.assign_call_cnt); + if (rebalance_cb.revoke_call_cnt != 1) + Test::Fail(tostr() << "Expected 1 revoke call, saw " + << rebalance_cb.revoke_call_cnt); + } SUB_TEST_PASS(); } @@ -1682,49 +1718,57 @@ static void l_unsubscribe() { "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30); Test::subscribe(c2, topic_name_1, topic_name_2); - bool done = false; - bool unsubscribed = false; + bool done = false; + bool unsubscribed = false; + int expected_cb1_assign_call_cnt = 1; + int expected_cb1_revoke_call_cnt = 1; + int expected_cb2_assign_call_cnt = 1; + while (!done) { Test::poll_once(c1, 500); Test::poll_once(c2, 500); if (Test::assignment_partition_count(c1, NULL) == 2 && Test::assignment_partition_count(c2, NULL) == 2) { - if (rebalance_cb1.assign_call_cnt != 1) - Test::Fail( - tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " - << rebalance_cb1.assign_call_cnt); - if (rebalance_cb2.assign_call_cnt != 1) - Test::Fail( - tostr() << "Expecting consumer 2's assign_call_cnt to be 1 not: " - << rebalance_cb2.assign_call_cnt); + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be " + << expected_cb1_assign_call_cnt + << " not: " << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be " + << expected_cb2_assign_call_cnt + << " not: " << rebalance_cb2.assign_call_cnt); + } Test::Say("Unsubscribing consumer 1 from both topics\n"); c1->unsubscribe(); unsubscribed = true; + expected_cb2_assign_call_cnt++; } if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 && Test::assignment_partition_count(c2, NULL) == 4) { - if (rebalance_cb1.assign_call_cnt != - 1) /* is now unsubscribed, so rebalance_cb will no longer be called. - */ - Test::Fail( - tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " - << rebalance_cb1.assign_call_cnt); - if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail( - tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: " - << rebalance_cb2.assign_call_cnt); - if (rebalance_cb1.revoke_call_cnt != 1) - Test::Fail( - tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: " - << rebalance_cb1.revoke_call_cnt); - if (rebalance_cb2.revoke_call_cnt != - 0) /* the rebalance_cb should not be called if the revoked partition - list is empty */ - Test::Fail( - tostr() << "Expecting consumer 2's revoke_call_cnt to be 0 not: " - << rebalance_cb2.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + /* is now unsubscribed, so rebalance_cb will no longer be called. */ + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be " + << expected_cb1_assign_call_cnt + << " not: " << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be " + << expected_cb2_assign_call_cnt + << " not: " << rebalance_cb2.assign_call_cnt); + if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be " + << expected_cb1_revoke_call_cnt + << " not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != + 0) /* the rebalance_cb should not be called if the revoked partition + list is empty */ + Test::Fail(tostr() + << "Expecting consumer 2's revoke_call_cnt to be 0 not: " + << rebalance_cb2.revoke_call_cnt); + } Test::Say("Unsubscribe completed"); done = true; } @@ -1735,21 +1779,26 @@ static void l_unsubscribe() { Test::Say("Closing consumer 2\n"); c2->close(); - /* there should be no assign rebalance_cb calls on close */ - if (rebalance_cb1.assign_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " - << rebalance_cb1.assign_call_cnt); - if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: " - << rebalance_cb2.assign_call_cnt); - - if (rebalance_cb1.revoke_call_cnt != - 1) /* should not be called a second revoke rebalance_cb */ - Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: " - << rebalance_cb1.revoke_call_cnt); - if (rebalance_cb2.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: " - << rebalance_cb2.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + /* there should be no assign rebalance_cb calls on close */ + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be " + << expected_cb1_assign_call_cnt + << " not: " << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be " + << expected_cb2_assign_call_cnt + << " not: " << rebalance_cb2.assign_call_cnt); + + if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be " + << expected_cb1_revoke_call_cnt + << " not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != 1) + Test::Fail( + tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: " + << rebalance_cb2.revoke_call_cnt); + } if (rebalance_cb1.lost_call_cnt != 0) Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: " @@ -1850,12 +1899,16 @@ static void n_wildcard() { Test::poll_once(c1, 500); Test::poll_once(c2, 500); - if (rebalance_cb1.assign_call_cnt != 0) - Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: " - << rebalance_cb1.assign_call_cnt); - if (rebalance_cb2.assign_call_cnt != 0) - Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: " - << rebalance_cb2.assign_call_cnt); + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.assign_call_cnt != 0) + Test::Fail( + tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: " + << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != 0) + Test::Fail( + tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: " + << rebalance_cb2.assign_call_cnt); + } bool done = false; bool created_topics = false; @@ -1921,13 +1974,16 @@ static void n_wildcard() { if (Test::assignment_partition_count(c1, NULL) == 1 && Test::assignment_partition_count(c2, NULL) == 1 && deleted_topic) { - /* accumulated in lost case as well */ - TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 1, - "Expecting C_1's revoke_call_cnt to be 1 not %d", - rebalance_cb1.revoke_call_cnt); - TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 1, - "Expecting C_2's revoke_call_cnt to be 1 not %d", - rebalance_cb2.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + /* accumulated in lost case as well */ + TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 1, + "Expecting C_1's revoke_call_cnt to be 1 not %d", + rebalance_cb1.revoke_call_cnt); + TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 1, + "Expecting C_2's revoke_call_cnt to be 1 not %d", + rebalance_cb2.revoke_call_cnt); + } + TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1, "Expecting C_1's lost_call_cnt to be 1 not %d", rebalance_cb1.lost_call_cnt); @@ -1955,10 +2011,12 @@ static void n_wildcard() { last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt; c1->close(); - /* There should be no assign rebalance_cb calls on close */ - TEST_ASSERT(rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt, - "Expecting C_1's assign_call_cnt to be %d not %d", - last_cb1_assign_call_cnt, rebalance_cb1.assign_call_cnt); + if (test_consumer_group_protocol_generic()) { + /* There should be no assign rebalance_cb calls on close */ + TEST_ASSERT(rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt, + "Expecting C_1's assign_call_cnt to be %d not %d", + last_cb1_assign_call_cnt, rebalance_cb1.assign_call_cnt); + } /* Let C_2 catch up on the rebalance and get assigned C_1's partitions. */ last_cb2_assign_call_cnt = rebalance_cb2.nonempty_assign_call_cnt; @@ -1969,17 +2027,19 @@ static void n_wildcard() { last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt; c2->close(); - /* There should be no assign rebalance_cb calls on close */ - TEST_ASSERT(rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt, - "Expecting C_2's assign_call_cnt to be %d not %d", - last_cb2_assign_call_cnt, rebalance_cb2.assign_call_cnt); - - TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 2, - "Expecting C_1's revoke_call_cnt to be 2 not %d", - rebalance_cb1.revoke_call_cnt); - TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 2, - "Expecting C_2's revoke_call_cnt to be 2 not %d", - rebalance_cb2.revoke_call_cnt); + if (test_consumer_group_protocol_generic()) { + /* There should be no assign rebalance_cb calls on close */ + TEST_ASSERT(rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt, + "Expecting C_2's assign_call_cnt to be %d not %d", + last_cb2_assign_call_cnt, rebalance_cb2.assign_call_cnt); + + TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 2, + "Expecting C_1's revoke_call_cnt to be 2 not %d", + rebalance_cb1.revoke_call_cnt); + TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 2, + "Expecting C_2's revoke_call_cnt to be 2 not %d", + rebalance_cb2.revoke_call_cnt); + } TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1, "Expecting C_1's lost_call_cnt to be 1, not %d", @@ -2059,7 +2119,8 @@ static void o_java_interop() { if (Test::assignment_partition_count(c, NULL) == 4 && java_pid != 0 && !changed_subscription) { - if (rebalance_cb.assign_call_cnt != 2) + if (test_consumer_group_protocol_generic() && + rebalance_cb.assign_call_cnt != 2) Test::Fail(tostr() << "Expecting consumer's assign_call_cnt to be 2, " "not " << rebalance_cb.assign_call_cnt); @@ -2200,8 +2261,14 @@ static void t_max_poll_interval_exceeded(int variation) { Test::subscribe(c1, topic_name_1); Test::subscribe(c2, topic_name_1); - bool done = false; - bool both_have_been_assigned = false; + bool done = false; + bool both_have_been_assigned = false; + int expected_cb1_assign_call_cnt = 1; + int expected_cb2_assign_call_cnt = 2; + int expected_cb1_revoke_call_cnt = 1; + int expected_cb2_revoke_call_cnt = 1; + int expected_cb1_lost_call_cnt = 1; + while (!done) { if (!both_have_been_assigned) Test::poll_once(c1, 500); @@ -2224,7 +2291,7 @@ static void t_max_poll_interval_exceeded(int variation) { } } - if (variation == 1) { + if (variation == 1 || variation == 3) { if (rebalance_cb1.lost_call_cnt != 0) Test::Fail( tostr() << "Expected consumer 1 lost revoke count to be 0, not: " @@ -2233,32 +2300,45 @@ static void t_max_poll_interval_exceeded(int variation) { 500); /* Eat the max poll interval exceeded error message */ Test::poll_once(c1, 500); /* Trigger the rebalance_cb with lost partitions */ - if (rebalance_cb1.lost_call_cnt != 1) - Test::Fail( - tostr() << "Expected consumer 1 lost revoke count to be 1, not: " - << rebalance_cb1.lost_call_cnt); + if (rebalance_cb1.lost_call_cnt != expected_cb1_lost_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be " + << expected_cb1_lost_call_cnt + << ", not: " << rebalance_cb1.lost_call_cnt); + } + + if (variation == 3) { + /* Last poll will cause a rejoin, wait that the rejoin happens. */ + rd_sleep(5); + expected_cb2_revoke_call_cnt++; } c1->close(); c2->close(); - if (rebalance_cb1.lost_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 1, not: " - << rebalance_cb1.lost_call_cnt); - - if (rebalance_cb1.assign_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 assign count to be 1, not: " - << rebalance_cb1.assign_call_cnt); - if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expected consumer 1 assign count to be 2, not: " - << rebalance_cb1.assign_call_cnt); - - if (rebalance_cb1.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 revoke count to be 1, not: " - << rebalance_cb1.revoke_call_cnt); - if (rebalance_cb2.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 2 revoke count to be 1, not: " - << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb1.lost_call_cnt != expected_cb1_lost_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be " + << expected_cb1_lost_call_cnt + << ", not: " << rebalance_cb1.lost_call_cnt); + + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.nonempty_assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 non-empty assign count to be " + << expected_cb1_assign_call_cnt << ", not: " + << rebalance_cb1.nonempty_assign_call_cnt); + if (rebalance_cb2.nonempty_assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expected consumer 2 non-empty assign count to be " + << expected_cb2_assign_call_cnt << ", not: " + << rebalance_cb2.nonempty_assign_call_cnt); + + if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 revoke count to be " + << expected_cb1_revoke_call_cnt + << ", not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != expected_cb2_revoke_call_cnt) + Test::Fail(tostr() << "Expected consumer 2 revoke count to be " + << expected_cb2_revoke_call_cnt + << ", not: " << rebalance_cb2.revoke_call_cnt); + } delete c1; delete c2; @@ -3042,6 +3122,8 @@ static void v_commit_during_rebalance(bool with_rebalance_cb, test_create_topic(p, topic, partition_cnt, 1); + test_wait_topic_exists(p, topic, 5000); + for (i = 0; i < partition_cnt; i++) { test_produce_msgs2(p, topic, testid, i, i * msgcnt_per_partition, msgcnt_per_partition, NULL, 0); @@ -3133,22 +3215,38 @@ static void x_incremental_rebalances(void) { test_consumer_subscribe(c[1], topic); test_consumer_wait_assignment(c[1], rd_true /*poll*/); rd_sleep(3); - test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3, - topic, 4, topic, 5, NULL); - test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0, - topic, 1, topic, 2, NULL); + if (test_consumer_group_protocol_generic()) { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3, + topic, 4, topic, 5, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0, + topic, 1, topic, 2, NULL); + } else { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 0, + topic, 1, topic, 2, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 3, + topic, 4, topic, 5, NULL); + } /* Third consumer joins group */ TEST_SAY("%s: joining\n", rd_kafka_name(c[2])); test_consumer_subscribe(c[2], topic); test_consumer_wait_assignment(c[2], rd_true /*poll*/); rd_sleep(3); - test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4, - topic, 5, NULL); - test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1, - topic, 2, NULL); - test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3, - topic, 0, NULL); + if (test_consumer_group_protocol_generic()) { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4, + topic, 5, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1, + topic, 2, NULL); + test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3, + topic, 0, NULL); + } else { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 0, + topic, 1, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 3, + topic, 4, NULL); + test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 2, + topic, 5, NULL); + } /* Raise any previously failed verify_assignment calls and fail the test */ TEST_LATER_CHECK(); @@ -3163,6 +3261,8 @@ static void x_incremental_rebalances(void) { /* Local tests not needing a cluster */ int main_0113_cooperative_rebalance_local(int argc, char **argv) { + TEST_SKIP_MOCK_CLUSTER(0); + a_assign_rapid(); p_lost_partitions_heartbeat_illegal_generation_test(); q_lost_partitions_illegal_generation_test(rd_false /*joingroup*/); @@ -3198,16 +3298,23 @@ int main_0113_cooperative_rebalance(int argc, char **argv) { k_add_partition(); l_unsubscribe(); m_unsubscribe_2(); - n_wildcard(); + if (test_consumer_group_protocol_generic()) { + /* FIXME: should work with next ConsumerGroupHeartbeat version */ + n_wildcard(); + } o_java_interop(); for (i = 1; i <= 6; i++) /* iterate over 6 different test variations */ s_subscribe_when_rebalancing(i); - for (i = 1; i <= 2; i++) + for (i = 1; i <= 3; i++) t_max_poll_interval_exceeded(i); /* Run all 2*3 variations of the u_.. test */ for (i = 0; i < 3; i++) { - u_multiple_subscription_changes(true /*with rebalance_cb*/, i); - u_multiple_subscription_changes(false /*without rebalance_cb*/, i); + if (test_consumer_group_protocol_generic()) { + /* FIXME: check this test, it should fail because of the callback number + */ + u_multiple_subscription_changes(true /*with rebalance_cb*/, i); + u_multiple_subscription_changes(false /*without rebalance_cb*/, i); + } } v_commit_during_rebalance(true /*with rebalance callback*/, true /*auto commit*/); diff --git a/lib/librdkafka-2.3.0/tests/0114-sticky_partitioning.cpp b/lib/librdkafka-2.4.0/tests/0114-sticky_partitioning.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0114-sticky_partitioning.cpp rename to lib/librdkafka-2.4.0/tests/0114-sticky_partitioning.cpp diff --git a/lib/librdkafka-2.3.0/tests/0115-producer_auth.cpp b/lib/librdkafka-2.4.0/tests/0115-producer_auth.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0115-producer_auth.cpp rename to lib/librdkafka-2.4.0/tests/0115-producer_auth.cpp diff --git a/lib/librdkafka-2.3.0/tests/0116-kafkaconsumer_close.cpp b/lib/librdkafka-2.4.0/tests/0116-kafkaconsumer_close.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0116-kafkaconsumer_close.cpp rename to lib/librdkafka-2.4.0/tests/0116-kafkaconsumer_close.cpp diff --git a/lib/librdkafka-2.3.0/tests/0117-mock_errors.c b/lib/librdkafka-2.4.0/tests/0117-mock_errors.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0117-mock_errors.c rename to lib/librdkafka-2.4.0/tests/0117-mock_errors.c index b91a3b61e8c..bd359bcef57 100644 --- a/lib/librdkafka-2.3.0/tests/0117-mock_errors.c +++ b/lib/librdkafka-2.4.0/tests/0117-mock_errors.c @@ -305,10 +305,7 @@ static void do_test_joingroup_coordinator_load_in_progress() { int main_0117_mock_errors(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_producer_storage_error(rd_false); do_test_producer_storage_error(rd_true); diff --git a/lib/librdkafka-2.3.0/tests/0118-commit_rebalance.c b/lib/librdkafka-2.4.0/tests/0118-commit_rebalance.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0118-commit_rebalance.c rename to lib/librdkafka-2.4.0/tests/0118-commit_rebalance.c diff --git a/lib/librdkafka-2.3.0/tests/0119-consumer_auth.cpp b/lib/librdkafka-2.4.0/tests/0119-consumer_auth.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0119-consumer_auth.cpp rename to lib/librdkafka-2.4.0/tests/0119-consumer_auth.cpp diff --git a/lib/librdkafka-2.3.0/tests/0120-asymmetric_subscription.c b/lib/librdkafka-2.4.0/tests/0120-asymmetric_subscription.c similarity index 97% rename from lib/librdkafka-2.3.0/tests/0120-asymmetric_subscription.c rename to lib/librdkafka-2.4.0/tests/0120-asymmetric_subscription.c index 11ee5f705ea..aedbca20a13 100644 --- a/lib/librdkafka-2.3.0/tests/0120-asymmetric_subscription.c +++ b/lib/librdkafka-2.4.0/tests/0120-asymmetric_subscription.c @@ -158,10 +158,7 @@ int main_0120_asymmetric_subscription(int argc, char **argv) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); mcluster = test_mock_cluster_new(3, &bootstraps); diff --git a/lib/librdkafka-2.3.0/tests/0121-clusterid.c b/lib/librdkafka-2.4.0/tests/0121-clusterid.c similarity index 96% rename from lib/librdkafka-2.3.0/tests/0121-clusterid.c rename to lib/librdkafka-2.4.0/tests/0121-clusterid.c index 0a463a88d04..f1b833592e6 100644 --- a/lib/librdkafka-2.3.0/tests/0121-clusterid.c +++ b/lib/librdkafka-2.4.0/tests/0121-clusterid.c @@ -65,10 +65,7 @@ int main_0121_clusterid(int argc, char **argv) { rd_atomic32_t log_cnt; int cnt = 0; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); /* Create two clusters */ cluster_a = test_mock_cluster_new(1, &bootstraps_a); diff --git a/lib/librdkafka-2.3.0/tests/0122-buffer_cleaning_after_rebalance.c b/lib/librdkafka-2.4.0/tests/0122-buffer_cleaning_after_rebalance.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0122-buffer_cleaning_after_rebalance.c rename to lib/librdkafka-2.4.0/tests/0122-buffer_cleaning_after_rebalance.c diff --git a/lib/librdkafka-2.3.0/tests/0123-connections_max_idle.c b/lib/librdkafka-2.4.0/tests/0123-connections_max_idle.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0123-connections_max_idle.c rename to lib/librdkafka-2.4.0/tests/0123-connections_max_idle.c diff --git a/lib/librdkafka-2.3.0/tests/0124-openssl_invalid_engine.c b/lib/librdkafka-2.4.0/tests/0124-openssl_invalid_engine.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0124-openssl_invalid_engine.c rename to lib/librdkafka-2.4.0/tests/0124-openssl_invalid_engine.c diff --git a/lib/librdkafka-2.3.0/tests/0125-immediate_flush.c b/lib/librdkafka-2.4.0/tests/0125-immediate_flush.c similarity index 97% rename from lib/librdkafka-2.3.0/tests/0125-immediate_flush.c rename to lib/librdkafka-2.4.0/tests/0125-immediate_flush.c index c7cbcca174b..35c98c4fd51 100644 --- a/lib/librdkafka-2.3.0/tests/0125-immediate_flush.c +++ b/lib/librdkafka-2.4.0/tests/0125-immediate_flush.c @@ -136,10 +136,7 @@ int main_0125_immediate_flush(int argc, char **argv) { int main_0125_immediate_flush_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_first_flush_immediate(); diff --git a/lib/librdkafka-2.3.0/tests/0126-oauthbearer_oidc.c b/lib/librdkafka-2.4.0/tests/0126-oauthbearer_oidc.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0126-oauthbearer_oidc.c rename to lib/librdkafka-2.4.0/tests/0126-oauthbearer_oidc.c diff --git a/lib/librdkafka-2.3.0/tests/0127-fetch_queue_backoff.cpp b/lib/librdkafka-2.4.0/tests/0127-fetch_queue_backoff.cpp similarity index 94% rename from lib/librdkafka-2.3.0/tests/0127-fetch_queue_backoff.cpp rename to lib/librdkafka-2.4.0/tests/0127-fetch_queue_backoff.cpp index 41c2db8c3ba..131ff57e35e 100644 --- a/lib/librdkafka-2.3.0/tests/0127-fetch_queue_backoff.cpp +++ b/lib/librdkafka-2.4.0/tests/0127-fetch_queue_backoff.cpp @@ -86,22 +86,22 @@ static void do_test_queue_backoff(const std::string &topic, int backoff_ms) { int received = 0; int in_profile_cnt = 0; - int dmax = - (int)((double)backoff_ms * (test_timeout_multiplier > 1 ? 1.5 : 1.2)); - if (backoff_ms < 15) - dmax = 15; + int dmax = backoff_ms + test_timeout_multiplier * 30; int64_t ts_consume = test_clock(); while (received < 5) { /* Wait more than dmax to count out of profile messages. * Different for first message, that is skipped. */ - int consume_timeout = - received == 0 ? 500 * test_timeout_multiplier : dmax * 2; + int consume_timeout = received == 0 ? 1500 * test_timeout_multiplier : dmax; RdKafka::Message *msg = c->consume(consume_timeout); + if (msg->err() == RdKafka::ERR__TIMED_OUT) { + delete msg; + continue; + } rd_ts_t now = test_clock(); - int latency = (test_clock() - ts_consume) / 1000; + int latency = (now - ts_consume) / 1000; ts_consume = now; bool in_profile = latency <= dmax; diff --git a/lib/librdkafka-2.3.0/tests/0128-sasl_callback_queue.cpp b/lib/librdkafka-2.4.0/tests/0128-sasl_callback_queue.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0128-sasl_callback_queue.cpp rename to lib/librdkafka-2.4.0/tests/0128-sasl_callback_queue.cpp diff --git a/lib/librdkafka-2.3.0/tests/0129-fetch_aborted_msgs.c b/lib/librdkafka-2.4.0/tests/0129-fetch_aborted_msgs.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0129-fetch_aborted_msgs.c rename to lib/librdkafka-2.4.0/tests/0129-fetch_aborted_msgs.c diff --git a/lib/librdkafka-2.3.0/tests/0130-store_offsets.c b/lib/librdkafka-2.4.0/tests/0130-store_offsets.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0130-store_offsets.c rename to lib/librdkafka-2.4.0/tests/0130-store_offsets.c diff --git a/lib/librdkafka-2.3.0/tests/0131-connect_timeout.c b/lib/librdkafka-2.4.0/tests/0131-connect_timeout.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0131-connect_timeout.c rename to lib/librdkafka-2.4.0/tests/0131-connect_timeout.c diff --git a/lib/librdkafka-2.3.0/tests/0132-strategy_ordering.c b/lib/librdkafka-2.4.0/tests/0132-strategy_ordering.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0132-strategy_ordering.c rename to lib/librdkafka-2.4.0/tests/0132-strategy_ordering.c diff --git a/lib/librdkafka-2.3.0/tests/0133-ssl_keys.c b/lib/librdkafka-2.4.0/tests/0133-ssl_keys.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0133-ssl_keys.c rename to lib/librdkafka-2.4.0/tests/0133-ssl_keys.c diff --git a/lib/librdkafka-2.3.0/tests/0134-ssl_provider.c b/lib/librdkafka-2.4.0/tests/0134-ssl_provider.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0134-ssl_provider.c rename to lib/librdkafka-2.4.0/tests/0134-ssl_provider.c diff --git a/lib/librdkafka-2.3.0/tests/0135-sasl_credentials.cpp b/lib/librdkafka-2.4.0/tests/0135-sasl_credentials.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0135-sasl_credentials.cpp rename to lib/librdkafka-2.4.0/tests/0135-sasl_credentials.cpp diff --git a/lib/librdkafka-2.3.0/tests/0136-resolve_cb.c b/lib/librdkafka-2.4.0/tests/0136-resolve_cb.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0136-resolve_cb.c rename to lib/librdkafka-2.4.0/tests/0136-resolve_cb.c diff --git a/lib/librdkafka-2.3.0/tests/0137-barrier_batch_consume.c b/lib/librdkafka-2.4.0/tests/0137-barrier_batch_consume.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0137-barrier_batch_consume.c rename to lib/librdkafka-2.4.0/tests/0137-barrier_batch_consume.c diff --git a/lib/librdkafka-2.3.0/tests/0138-admin_mock.c b/lib/librdkafka-2.4.0/tests/0138-admin_mock.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/0138-admin_mock.c rename to lib/librdkafka-2.4.0/tests/0138-admin_mock.c index 32c67c09d7d..77487cc7959 100644 --- a/lib/librdkafka-2.3.0/tests/0138-admin_mock.c +++ b/lib/librdkafka-2.4.0/tests/0138-admin_mock.c @@ -270,10 +270,7 @@ static void do_test_ListOffsets_leader_change(void) { int main_0138_admin_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_AlterConsumerGroupOffsets_errors(-1); do_test_AlterConsumerGroupOffsets_errors(1000); diff --git a/lib/librdkafka-2.3.0/tests/0139-offset_validation_mock.c b/lib/librdkafka-2.4.0/tests/0139-offset_validation_mock.c similarity index 99% rename from lib/librdkafka-2.3.0/tests/0139-offset_validation_mock.c rename to lib/librdkafka-2.4.0/tests/0139-offset_validation_mock.c index 967563fd703..f6f9271eecb 100644 --- a/lib/librdkafka-2.3.0/tests/0139-offset_validation_mock.c +++ b/lib/librdkafka-2.4.0/tests/0139-offset_validation_mock.c @@ -427,10 +427,7 @@ static void do_test_store_offset_without_leader_epoch(void) { int main_0139_offset_validation_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_no_duplicates_during_offset_validation(); diff --git a/lib/librdkafka-2.3.0/tests/0140-commit_metadata.cpp b/lib/librdkafka-2.4.0/tests/0140-commit_metadata.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/0140-commit_metadata.cpp rename to lib/librdkafka-2.4.0/tests/0140-commit_metadata.cpp diff --git a/lib/librdkafka-2.3.0/tests/0142-reauthentication.c b/lib/librdkafka-2.4.0/tests/0142-reauthentication.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/0142-reauthentication.c rename to lib/librdkafka-2.4.0/tests/0142-reauthentication.c diff --git a/lib/librdkafka-2.3.0/tests/0143-exponential_backoff_mock.c b/lib/librdkafka-2.4.0/tests/0143-exponential_backoff_mock.c similarity index 97% rename from lib/librdkafka-2.3.0/tests/0143-exponential_backoff_mock.c rename to lib/librdkafka-2.4.0/tests/0143-exponential_backoff_mock.c index 80ae817d5ce..55a7d8fa08d 100644 --- a/lib/librdkafka-2.3.0/tests/0143-exponential_backoff_mock.c +++ b/lib/librdkafka-2.4.0/tests/0143-exponential_backoff_mock.c @@ -33,13 +33,6 @@ const int32_t retry_ms = 100; const int32_t retry_max_ms = 1000; -static void free_mock_requests(rd_kafka_mock_request_t **requests, - size_t request_cnt) { - size_t i; - for (i = 0; i < request_cnt; i++) - rd_kafka_mock_request_destroy(requests[i]); - rd_free(requests); -} /** * @brief find_coordinator test * We fail the request with RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, @@ -112,7 +105,7 @@ static void test_find_coordinator(rd_kafka_mock_cluster_t *mcluster, rd_kafka_mock_request_timestamp(requests[i]); } rd_kafka_destroy(consumer); - free_mock_requests(requests, request_cnt); + rd_kafka_mock_request_destroy_array(requests, request_cnt); rd_kafka_mock_clear_requests(mcluster); SUB_TEST_PASS(); } @@ -166,7 +159,7 @@ static void helper_exponential_backoff(rd_kafka_mock_cluster_t *mcluster, previous_request_ts = rd_kafka_mock_request_timestamp(requests[i]); } - free_mock_requests(requests, request_cnt); + rd_kafka_mock_request_destroy_array(requests, request_cnt); } /** * @brief offset_commit test @@ -297,7 +290,7 @@ static void helper_find_coordinator_trigger(rd_kafka_mock_cluster_t *mcluster, } } } - free_mock_requests(requests, request_cnt); + rd_kafka_mock_request_destroy_array(requests, request_cnt); if (num_request != 1) TEST_FAIL("No request was made."); } @@ -451,7 +444,7 @@ static void test_produce_fast_leader_query(rd_kafka_mock_cluster_t *mcluster, } rd_kafka_topic_destroy(rkt); rd_kafka_destroy(producer); - free_mock_requests(requests, request_cnt); + rd_kafka_mock_request_destroy_array(requests, request_cnt); rd_kafka_mock_clear_requests(mcluster); SUB_TEST_PASS(); } @@ -511,7 +504,7 @@ static void test_fetch_fast_leader_query(rd_kafka_mock_cluster_t *mcluster, previous_request_was_Fetch = rd_false; } rd_kafka_destroy(consumer); - free_mock_requests(requests, request_cnt); + rd_kafka_mock_request_destroy_array(requests, request_cnt); rd_kafka_mock_clear_requests(mcluster); TEST_ASSERT( Metadata_after_Fetch, @@ -530,10 +523,9 @@ int main_0143_exponential_backoff_mock(int argc, char **argv) { rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; const char *bootstraps; - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL.\n"); - return 0; - } + + TEST_SKIP_MOCK_CLUSTER(0); + mcluster = test_mock_cluster_new(1, &bootstraps); rd_kafka_mock_start_request_tracking(mcluster); rd_kafka_mock_topic_create(mcluster, topic, 1, 1); diff --git a/lib/librdkafka-2.3.0/tests/0144-idempotence_mock.c b/lib/librdkafka-2.4.0/tests/0144-idempotence_mock.c similarity index 99% rename from lib/librdkafka-2.3.0/tests/0144-idempotence_mock.c rename to lib/librdkafka-2.4.0/tests/0144-idempotence_mock.c index 62b392cde2e..25ba50eaec0 100644 --- a/lib/librdkafka-2.3.0/tests/0144-idempotence_mock.c +++ b/lib/librdkafka-2.4.0/tests/0144-idempotence_mock.c @@ -359,10 +359,7 @@ static void do_test_idempo_success_after_possibly_persisted(void) { } int main_0144_idempotence_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); int i; for (i = 1; i <= 5; i++) diff --git a/lib/librdkafka-2.4.0/tests/0145-pause_resume_mock.c b/lib/librdkafka-2.4.0/tests/0145-pause_resume_mock.c new file mode 100644 index 00000000000..34de903316a --- /dev/null +++ b/lib/librdkafka-2.4.0/tests/0145-pause_resume_mock.c @@ -0,0 +1,119 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2024, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +#include "rdkafka.h" +#include "../src/rdkafka_proto.h" + +#include + +/** + * Verify that no duplicate message are consumed after an unnecessary + * resume, ensuring the fetch version isn't bumped, leading to + * using a stale next fetch start. + * + * @param partition_assignment_strategy Assignment strategy to test. + */ +static void test_no_duplicate_messages_unnecessary_resume( + const char *partition_assignment_strategy) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_t *rk; + test_msgver_t mv; + rd_kafka_topic_partition_list_t *tlist; + char *topic = + rd_strdup(test_mk_topic_name("0050_unnecessary_resume_1", 1)); + uint64_t testid = test_id_generate(); + int msgcnt = 100; + + SUB_TEST("%s", partition_assignment_strategy); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + TEST_SAY("Seed the topic with messages\n"); + test_produce_msgs_easy_v(topic, testid, RD_KAFKA_PARTITION_UA, 0, + msgcnt, 1000, "bootstrap.servers", bootstraps, + NULL); + + test_conf_init(&conf, &tconf, 60); + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", + partition_assignment_strategy); + + TEST_SAY("Subscribe to topic\n"); + tlist = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(tlist, topic, RD_KAFKA_PARTITION_UA); + + rk = test_create_consumer("mygroup", NULL, conf, tconf); + TEST_CALL_ERR__(rd_kafka_subscribe(rk, tlist)); + + TEST_SAY("Consume and verify messages\n"); + test_msgver_init(&mv, testid); + test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv); + + TEST_SAY("Unnecessary resume\n"); + tlist->elems[0].partition = 0; /* Resume the only partition */ + TEST_CALL_ERR__(rd_kafka_resume_partitions(rk, tlist)); + + TEST_SAY("Ensure no duplicate messages\n"); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(3000)); + + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, + 0, msgcnt); + + test_msgver_clear(&mv); + + rd_kafka_topic_partition_list_destroy(tlist); + rd_kafka_consumer_close(rk); + rd_kafka_destroy(rk); + + test_mock_cluster_destroy(mcluster); + + rd_free(topic); + + SUB_TEST_PASS(); +} + +int main_0145_pause_resume_mock(int argc, char **argv) { + if (test_needs_auth()) { + TEST_SAY("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + test_no_duplicate_messages_unnecessary_resume("range"); + + test_no_duplicate_messages_unnecessary_resume("roundrobin"); + + test_no_duplicate_messages_unnecessary_resume("cooperative-sticky"); + + return 0; +} diff --git a/lib/librdkafka-2.4.0/tests/0146-metadata_mock.c b/lib/librdkafka-2.4.0/tests/0146-metadata_mock.c new file mode 100644 index 00000000000..95e03de8b3f --- /dev/null +++ b/lib/librdkafka-2.4.0/tests/0146-metadata_mock.c @@ -0,0 +1,272 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2024, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + +static rd_bool_t is_metadata_request(rd_kafka_mock_request_t *request, + void *opaque) { + return rd_kafka_mock_request_api_key(request) == RD_KAFKAP_Metadata; +} + +static rd_bool_t is_fetch_request(rd_kafka_mock_request_t *request, + void *opaque) { + return rd_kafka_mock_request_api_key(request) == RD_KAFKAP_Fetch; +} + +/** + * @brief Metadata should persists in cache after + * a full metadata refresh. + * + * @param assignor Assignor to use + */ +static void do_test_metadata_persists_in_cache(const char *assignor) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + const rd_kafka_metadata_t *md; + rd_kafka_topic_partition_list_t *subscription; + + SUB_TEST_QUICK("%s", assignor); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", assignor); + test_conf_set(conf, "group.id", topic); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + subscription = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(subscription, topic, 0); + + rkt = test_create_consumer_topic(rk, topic); + + /* Metadata for topic is available */ + TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, rkt, &md, 1000)); + rd_kafka_metadata_destroy(md); + md = NULL; + + /* Subscribe to same topic */ + TEST_CALL_ERR__(rd_kafka_subscribe(rk, subscription)); + + /* Request full metadata */ + TEST_CALL_ERR__(rd_kafka_metadata(rk, 1, NULL, &md, 1000)); + rd_kafka_metadata_destroy(md); + md = NULL; + + /* Subscribing shouldn't give UNKNOWN_TOPIC_OR_PART err. + * Verify no error was returned. */ + test_consumer_poll_no_msgs("no error", rk, 0, 100); + + rd_kafka_topic_partition_list_destroy(subscription); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief No loop of metadata requests should be started + * when a metadata request is made without leader epoch change. + * See issue #4577 + */ +static void do_test_fast_metadata_refresh_stops(void) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + int metadata_requests; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* This error triggers a metadata refresh but no leader change + * happened */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR); + + rd_kafka_mock_start_request_tracking(mcluster); + test_produce_msgs2(rk, topic, 0, 0, 0, 1, NULL, 5); + + /* First call is for getting initial metadata, + * second one happens after the error, + * it should stop refreshing metadata after that. */ + metadata_requests = test_mock_wait_matching_requests( + mcluster, 2, 500, is_metadata_request, NULL); + TEST_ASSERT(metadata_requests == 2, + "Expected 2 metadata request, got %d", metadata_requests); + rd_kafka_mock_stop_request_tracking(mcluster); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief A stale leader received while validating shouldn't + * migrate back the partition to that stale broker. + */ +static void do_test_stale_metadata_doesnt_migrate_partition(void) { + int i, fetch_requests; + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 3); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "group.id", topic); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "fetch.error.backoff.ms", "10"); + test_conf_set(conf, "fetch.wait.max.ms", "10"); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + test_consumer_subscribe(rk, topic); + + /* Produce and consume to leader 1 */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 1, 0, "bootstrap.servers", + bootstraps, NULL); + test_consumer_poll_exact("read first", rk, 0, 0, 0, 1, rd_true, NULL); + + /* Change leader to 2, Fetch fails, refreshes metadata. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + for (i = 0; i < 5; i++) { + /* Validation fails, metadata refreshed again */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 2, RD_KAFKAP_OffsetForLeaderEpoch, 1, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, 1000); + } + + /* Wait partition migrates to broker 2 */ + rd_usleep(100 * 1000, 0); + + /* Return stale metadata */ + for (i = 0; i < 10; i++) { + rd_kafka_mock_partition_push_leader_response( + mcluster, topic, 0, 1 /*leader id*/, 0 /*leader epoch*/); + } + + /* Partition doesn't have to migrate back to broker 1 */ + rd_usleep(2000 * 1000, 0); + rd_kafka_mock_start_request_tracking(mcluster); + fetch_requests = test_mock_wait_matching_requests( + mcluster, 0, 500, is_fetch_request, NULL); + TEST_ASSERT(fetch_requests == 0, + "No fetch request should be received by broker 1, got %d", + fetch_requests); + rd_kafka_mock_stop_request_tracking(mcluster); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief A metadata call for an existing topic, just after subscription, + * must not cause a UNKNOWN_TOPIC_OR_PART error. + * See issue #4589. + */ +static void do_test_metadata_call_before_join(void) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + const struct rd_kafka_metadata *metadata; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 3); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "group.id", topic); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + test_consumer_subscribe(rk, topic); + + TEST_CALL_ERR__(rd_kafka_metadata(rk, 1, 0, &metadata, 5000)); + rd_kafka_metadata_destroy(metadata); + + test_consumer_poll_no_msgs("no errors", rk, 0, 1000); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +int main_0146_metadata_mock(int argc, char **argv) { + TEST_SKIP_MOCK_CLUSTER(0); + + /* No need to test the "roundrobin" assignor case, + * as this is just for checking the two code paths: + * EAGER or COOPERATIVE one, and "range" is EAGER too. */ + do_test_metadata_persists_in_cache("range"); + do_test_metadata_persists_in_cache("cooperative-sticky"); + + do_test_metadata_call_before_join(); + + do_test_fast_metadata_refresh_stops(); + + do_test_stale_metadata_doesnt_migrate_partition(); + + return 0; +} diff --git a/lib/librdkafka-2.3.0/tests/1000-unktopic.c b/lib/librdkafka-2.4.0/tests/1000-unktopic.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/1000-unktopic.c rename to lib/librdkafka-2.4.0/tests/1000-unktopic.c diff --git a/lib/librdkafka-2.3.0/tests/8000-idle.cpp b/lib/librdkafka-2.4.0/tests/8000-idle.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/8000-idle.cpp rename to lib/librdkafka-2.4.0/tests/8000-idle.cpp diff --git a/lib/librdkafka-2.3.0/tests/8001-fetch_from_follower_mock_manual.c b/lib/librdkafka-2.4.0/tests/8001-fetch_from_follower_mock_manual.c similarity index 96% rename from lib/librdkafka-2.3.0/tests/8001-fetch_from_follower_mock_manual.c rename to lib/librdkafka-2.4.0/tests/8001-fetch_from_follower_mock_manual.c index d542be5f919..c6bc8024e4d 100644 --- a/lib/librdkafka-2.3.0/tests/8001-fetch_from_follower_mock_manual.c +++ b/lib/librdkafka-2.4.0/tests/8001-fetch_from_follower_mock_manual.c @@ -105,10 +105,7 @@ static void do_test_fetch_from_follower_offset_retry(void) { int main_8001_fetch_from_follower_mock_manual(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + TEST_SKIP_MOCK_CLUSTER(0); do_test_fetch_from_follower_offset_retry(); diff --git a/lib/librdkafka-2.3.0/tests/CMakeLists.txt b/lib/librdkafka-2.4.0/tests/CMakeLists.txt similarity index 98% rename from lib/librdkafka-2.3.0/tests/CMakeLists.txt rename to lib/librdkafka-2.4.0/tests/CMakeLists.txt index 8a4c285e95d..62ce0deb026 100644 --- a/lib/librdkafka-2.3.0/tests/CMakeLists.txt +++ b/lib/librdkafka-2.4.0/tests/CMakeLists.txt @@ -135,6 +135,8 @@ set( 0142-reauthentication.c 0143-exponential_backoff_mock.c 0144-idempotence_mock.c + 0145-pause_resume_mock.c + 0146-metadata_mock.c 8000-idle.cpp 8001-fetch_from_follower_mock_manual.c test.c diff --git a/lib/librdkafka-2.3.0/tests/LibrdkafkaTestApp.py b/lib/librdkafka-2.4.0/tests/LibrdkafkaTestApp.py similarity index 95% rename from lib/librdkafka-2.3.0/tests/LibrdkafkaTestApp.py rename to lib/librdkafka-2.4.0/tests/LibrdkafkaTestApp.py index 696fa88cc45..40fdd12341f 100644 --- a/lib/librdkafka-2.3.0/tests/LibrdkafkaTestApp.py +++ b/lib/librdkafka-2.4.0/tests/LibrdkafkaTestApp.py @@ -191,7 +191,7 @@ def __init__(self, cluster, version, conf=None, if tests is not None: self.env_add('TESTS', ','.join(tests)) - def start_cmd(self): + def finalize_env(self): self.env_add( 'KAFKA_PATH', self.cluster.get_all( @@ -199,19 +199,23 @@ def start_cmd(self): '', KafkaBrokerApp)[0], False) - self.env_add( - 'ZK_ADDRESS', - self.cluster.get_all( - 'address', - '', - ZookeeperApp)[0], - False) + + zookeeper = self.cluster.get_all( + 'address', + '', + ZookeeperApp) + if len(zookeeper): + self.env_add( + 'ZK_ADDRESS', + zookeeper[0], + False) self.env_add('BROKERS', self.cluster.bootstrap_servers(), False) # Provide a HTTPS REST endpoint for the HTTP client tests. self.env_add( 'RD_UT_HTTP_URL', - 'https://jsonplaceholder.typicode.com/users') + 'https://jsonplaceholder.typicode.com/users', + False) # Per broker env vars for b in [x for x in self.cluster.apps if isinstance( @@ -219,14 +223,20 @@ def start_cmd(self): self.env_add('BROKER_ADDRESS_%d' % b.appid, ','.join([x for x in b.conf['listeners'].split(',') - if x.startswith(self.security_protocol)])) + if x.startswith(self.security_protocol)]), + False) # Add each broker pid as an env so they can be killed # indivdidually. - self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid)) + self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid), False) # JMX port, if available jmx_port = b.conf.get('jmx_port', None) if jmx_port is not None: - self.env_add('BROKER_JMX_PORT_%d' % b.appid, str(jmx_port)) + self.env_add( + 'BROKER_JMX_PORT_%d' % + b.appid, str(jmx_port), False) + + def start_cmd(self): + self.finalize_env() extra_args = list() if not self.local_tests: diff --git a/lib/librdkafka-2.3.0/tests/Makefile b/lib/librdkafka-2.4.0/tests/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/tests/Makefile rename to lib/librdkafka-2.4.0/tests/Makefile diff --git a/lib/librdkafka-2.3.0/tests/README.md b/lib/librdkafka-2.4.0/tests/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/README.md rename to lib/librdkafka-2.4.0/tests/README.md diff --git a/lib/librdkafka-2.3.0/tests/autotest.sh b/lib/librdkafka-2.4.0/tests/autotest.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/autotest.sh rename to lib/librdkafka-2.4.0/tests/autotest.sh diff --git a/lib/librdkafka-2.3.0/tests/backtrace.gdb b/lib/librdkafka-2.4.0/tests/backtrace.gdb similarity index 100% rename from lib/librdkafka-2.3.0/tests/backtrace.gdb rename to lib/librdkafka-2.4.0/tests/backtrace.gdb diff --git a/lib/librdkafka-2.3.0/tests/broker_version_tests.py b/lib/librdkafka-2.4.0/tests/broker_version_tests.py similarity index 91% rename from lib/librdkafka-2.3.0/tests/broker_version_tests.py rename to lib/librdkafka-2.4.0/tests/broker_version_tests.py index 717da28d543..c451e02471b 100755 --- a/lib/librdkafka-2.3.0/tests/broker_version_tests.py +++ b/lib/librdkafka-2.4.0/tests/broker_version_tests.py @@ -23,7 +23,8 @@ def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, - interact=False, debug=False, scenario="default"): + interact=False, debug=False, scenario="default", kraft=False, + inherit_env=False): """ @brief Create, deploy and start a Kafka cluster using Kafka \\p version Then run librdkafka's regression tests. @@ -31,7 +32,8 @@ def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, cluster = LibrdkafkaTestCluster(version, conf, num_brokers=int(conf.get('broker_cnt', 3)), - debug=debug, scenario=scenario) + debug=debug, scenario=scenario, + kraft=kraft) # librdkafka's regression tests, as an App. _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf @@ -46,11 +48,20 @@ def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, cluster.start(timeout=30) if conf.get('test_mode', '') == 'bash': - cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % ( # noqa: E501 - cluster.name, version) + rdkafka.finalize_env() + + if inherit_env: + env = dict(os.environ, **rdkafka.env) + else: + env = dict(rdkafka.env) + trivup = f'[TRIVUP:{cluster.name}@{version}] ' + PS1 = ((trivup + env['PS1']) if 'PS1' in env + else trivup + '\\u@\\h:\\w$ ')\ + .translate(str.maketrans({'\'': '\\\''})) + cmd = f'bash --rcfile <(cat ~/.bashrc; echo \'PS1="{PS1}"\')' subprocess.call( cmd, - env=rdkafka.env, + env=env, shell=True, executable='/bin/bash') report = None @@ -175,6 +186,12 @@ def handle_report(report, version, suite): type=str, default=None, help='SASL mechanism (PLAIN, GSSAPI)') + parser.add_argument( + '--kraft', + dest='kraft', + action='store_true', + default=False, + help='Run in KRaft mode') args = parser.parse_args() @@ -239,7 +256,8 @@ def handle_report(report, version, suite): report = test_it(version, tests=tests, conf=_conf, rdkconf=_rdkconf, interact=args.interact, debug=args.debug, - scenario=args.scenario) + scenario=args.scenario, + kraft=args.kraft) if not report: continue diff --git a/lib/librdkafka-2.3.0/tests/buildbox.sh b/lib/librdkafka-2.4.0/tests/buildbox.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/buildbox.sh rename to lib/librdkafka-2.4.0/tests/buildbox.sh diff --git a/lib/librdkafka-2.3.0/tests/cleanup-checker-tests.sh b/lib/librdkafka-2.4.0/tests/cleanup-checker-tests.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/cleanup-checker-tests.sh rename to lib/librdkafka-2.4.0/tests/cleanup-checker-tests.sh diff --git a/lib/librdkafka-2.3.0/tests/cluster_testing.py b/lib/librdkafka-2.4.0/tests/cluster_testing.py similarity index 89% rename from lib/librdkafka-2.3.0/tests/cluster_testing.py rename to lib/librdkafka-2.4.0/tests/cluster_testing.py index cfdc08db635..d3189f1cdb9 100755 --- a/lib/librdkafka-2.3.0/tests/cluster_testing.py +++ b/lib/librdkafka-2.4.0/tests/cluster_testing.py @@ -37,7 +37,7 @@ def read_scenario_conf(scenario): class LibrdkafkaTestCluster(Cluster): def __init__(self, version, conf={}, num_brokers=3, debug=False, - scenario="default"): + scenario="default", kraft=False): """ @brief Create, deploy and start a Kafka cluster using Kafka \\p version @@ -61,8 +61,9 @@ def __init__(self, version, conf={}, num_brokers=3, debug=False, self.brokers = list() - # One ZK (from Kafka repo) - ZookeeperApp(self) + if not kraft: + # One ZK (from Kafka repo) + ZookeeperApp(self) # Start Kerberos KDC if GSSAPI (Kerberos) is configured if 'GSSAPI' in defconf.get('sasl_mechanisms', []): @@ -84,15 +85,22 @@ def __init__(self, version, conf={}, num_brokers=3, debug=False, self.conf = defconf for n in range(0, num_brokers): + defconf_curr = dict(defconf) + if 'conf' in defconf_curr: + defconf_curr['conf'] = list(defconf_curr['conf']) # Configure rack & replica selector if broker supports # fetch-from-follower if version_as_list(version) >= [2, 4, 0]: - defconf.update( + curr_conf = defconf_curr.get('conf', list()) + defconf_curr.update( { 'conf': [ 'broker.rack=RACK${appid}', - 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501 - self.brokers.append(KafkaBrokerApp(self, defconf)) + 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector' # noqa: E501 + ] + curr_conf + }) # noqa: E501 + print('conf broker', str(n), ': ', defconf_curr) + self.brokers.append(KafkaBrokerApp(self, defconf_curr)) def bootstrap_servers(self): """ @return Kafka bootstrap servers based on security.protocol """ diff --git a/lib/librdkafka-2.3.0/tests/delete-test-topics.sh b/lib/librdkafka-2.4.0/tests/delete-test-topics.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/delete-test-topics.sh rename to lib/librdkafka-2.4.0/tests/delete-test-topics.sh diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/.gitignore b/lib/librdkafka-2.4.0/tests/fixtures/ssl/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/.gitignore rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/.gitignore diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/Makefile b/lib/librdkafka-2.4.0/tests/fixtures/ssl/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/Makefile rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/Makefile diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/README.md b/lib/librdkafka-2.4.0/tests/fixtures/ssl/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/README.md rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/README.md diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/client.keystore.p12 b/lib/librdkafka-2.4.0/tests/fixtures/ssl/client.keystore.p12 similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/client.keystore.p12 rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/client.keystore.p12 diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/client2.certificate.pem b/lib/librdkafka-2.4.0/tests/fixtures/ssl/client2.certificate.pem similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/client2.certificate.pem rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/client2.certificate.pem diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/client2.key b/lib/librdkafka-2.4.0/tests/fixtures/ssl/client2.key similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/client2.key rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/client2.key diff --git a/lib/librdkafka-2.3.0/tests/fixtures/ssl/create_keys.sh b/lib/librdkafka-2.4.0/tests/fixtures/ssl/create_keys.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/fixtures/ssl/create_keys.sh rename to lib/librdkafka-2.4.0/tests/fixtures/ssl/create_keys.sh diff --git a/lib/librdkafka-2.3.0/tests/fuzzers/.gitignore b/lib/librdkafka-2.4.0/tests/fuzzers/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/tests/fuzzers/.gitignore rename to lib/librdkafka-2.4.0/tests/fuzzers/.gitignore diff --git a/lib/librdkafka-2.3.0/tests/fuzzers/Makefile b/lib/librdkafka-2.4.0/tests/fuzzers/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/tests/fuzzers/Makefile rename to lib/librdkafka-2.4.0/tests/fuzzers/Makefile diff --git a/lib/librdkafka-2.3.0/tests/fuzzers/README.md b/lib/librdkafka-2.4.0/tests/fuzzers/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/fuzzers/README.md rename to lib/librdkafka-2.4.0/tests/fuzzers/README.md diff --git a/lib/librdkafka-2.3.0/tests/fuzzers/fuzz_regex.c b/lib/librdkafka-2.4.0/tests/fuzzers/fuzz_regex.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/fuzzers/fuzz_regex.c rename to lib/librdkafka-2.4.0/tests/fuzzers/fuzz_regex.c diff --git a/lib/librdkafka-2.3.0/tests/fuzzers/helpers.h b/lib/librdkafka-2.4.0/tests/fuzzers/helpers.h similarity index 100% rename from lib/librdkafka-2.3.0/tests/fuzzers/helpers.h rename to lib/librdkafka-2.4.0/tests/fuzzers/helpers.h diff --git a/lib/librdkafka-2.3.0/tests/gen-ssl-certs.sh b/lib/librdkafka-2.4.0/tests/gen-ssl-certs.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/gen-ssl-certs.sh rename to lib/librdkafka-2.4.0/tests/gen-ssl-certs.sh diff --git a/lib/librdkafka-2.4.0/tests/interactive_broker_version.py b/lib/librdkafka-2.4.0/tests/interactive_broker_version.py new file mode 100755 index 00000000000..acddc872fd8 --- /dev/null +++ b/lib/librdkafka-2.4.0/tests/interactive_broker_version.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +# +# +# Run librdkafka regression tests on different supported broker versions. +# +# Requires: +# trivup python module +# gradle in your PATH + +from cluster_testing import read_scenario_conf +from broker_version_tests import test_it + +import os +import sys +import argparse +import json + + +def version_as_number(version): + if version == 'trunk': + return sys.maxsize + tokens = version.split('.') + return float('%s.%s' % (tokens[0], tokens[1])) + + +def test_version(version, cmd=None, deploy=True, conf={}, debug=False, + exec_cnt=1, + root_path='tmp', broker_cnt=3, scenario='default', + kraft=False): + """ + @brief Create, deploy and start a Kafka cluster using Kafka \\p version + Then run librdkafka's regression tests. Use inherited environment. + """ + conf['test_mode'] = 'bash' + test_it(version, deploy, conf, {}, None, True, debug, + scenario, kraft, True) + return True + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser( + description='Start a Kafka cluster and provide an interactive shell') + + parser.add_argument('versions', type=str, default=None, nargs='+', + help='Kafka version(s) to deploy') + parser.add_argument('--no-deploy', action='store_false', dest='deploy', + default=True, + help='Dont deploy applications, ' + 'assume already deployed.') + parser.add_argument('--conf', type=str, dest='conf', default=None, + help=''' + JSON config object (not file). + This does not translate to broker configs directly. + If broker config properties are to be specified, + they should be specified with + --conf \'{"conf": ["key=value", "key=value"]}\'''') + parser.add_argument('--scenario', type=str, dest='scenario', + default='default', + help='Test scenario (see scenarios/ directory)') + parser.add_argument('-c', type=str, dest='cmd', default=None, + help='Command to execute instead of shell') + parser.add_argument('-n', type=int, dest='exec_cnt', default=1, + help='Number of times to execute -c ..') + parser.add_argument('--debug', action='store_true', dest='debug', + default=False, + help='Enable trivup debugging') + parser.add_argument( + '--root', + type=str, + default=os.environ.get( + 'TRIVUP_ROOT', + 'tmp'), + help='Root working directory') + parser.add_argument( + '--port', + default=None, + help='Base TCP port to start allocating from') + parser.add_argument( + '--kafka-src', + dest='kafka_path', + type=str, + default=None, + help='Path to Kafka git repo checkout (used for version=trunk)') + parser.add_argument( + '--brokers', + dest='broker_cnt', + type=int, + default=3, + help='Number of Kafka brokers') + parser.add_argument('--ssl', dest='ssl', action='store_true', + default=False, + help='Enable SSL endpoints') + parser.add_argument( + '--sasl', + dest='sasl', + type=str, + default=None, + help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)') + parser.add_argument( + '--oauthbearer-method', + dest='sasl_oauthbearer_method', + type=str, + default=None, + help='OAUTHBEARER/OIDC method (DEFAULT, OIDC), \ + must config SASL mechanism to OAUTHBEARER') + parser.add_argument( + '--max-reauth-ms', + dest='reauth_ms', + type=int, + default='10000', + help=''' + Sets the value of connections.max.reauth.ms on the brokers. + Set 0 to disable.''') + parser.add_argument( + '--kraft', + dest='kraft', + action='store_true', + default=False, + help='Run in KRaft mode') + + args = parser.parse_args() + if args.conf is not None: + args.conf = json.loads(args.conf) + else: + args.conf = {} + + args.conf.update(read_scenario_conf(args.scenario)) + + if args.port is not None: + args.conf['port_base'] = int(args.port) + if args.kafka_path is not None: + args.conf['kafka_path'] = args.kafka_path + if args.ssl: + args.conf['security.protocol'] = 'SSL' + if args.sasl: + if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM') + != -1) and 'sasl_users' not in args.conf: + args.conf['sasl_users'] = 'testuser=testpass' + args.conf['sasl_mechanisms'] = args.sasl + retcode = 0 + if args.sasl_oauthbearer_method: + if args.sasl_oauthbearer_method == "OIDC" and \ + args.conf['sasl_mechanisms'] != 'OAUTHBEARER': + print('If config `--oauthbearer-method=OIDC`, ' + '`--sasl` must be set to `OAUTHBEARER`') + retcode = 3 + sys.exit(retcode) + args.conf['sasl_oauthbearer_method'] = \ + args.sasl_oauthbearer_method + + if 'conf' not in args.conf: + args.conf['conf'] = [] + + args.conf['conf'].append( + "connections.max.reauth.ms={}".format( + args.reauth_ms)) + args.conf['conf'].append("log.retention.bytes=1000000000") + + for version in args.versions: + r = test_version(version, cmd=args.cmd, deploy=args.deploy, + conf=args.conf, debug=args.debug, + exec_cnt=args.exec_cnt, + root_path=args.root, broker_cnt=args.broker_cnt, + scenario=args.scenario, + kraft=args.kraft) + if not r: + retcode = 2 + + sys.exit(retcode) diff --git a/lib/librdkafka-2.3.0/tests/interceptor_test/.gitignore b/lib/librdkafka-2.4.0/tests/interceptor_test/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/tests/interceptor_test/.gitignore rename to lib/librdkafka-2.4.0/tests/interceptor_test/.gitignore diff --git a/lib/librdkafka-2.3.0/tests/interceptor_test/CMakeLists.txt b/lib/librdkafka-2.4.0/tests/interceptor_test/CMakeLists.txt similarity index 100% rename from lib/librdkafka-2.3.0/tests/interceptor_test/CMakeLists.txt rename to lib/librdkafka-2.4.0/tests/interceptor_test/CMakeLists.txt diff --git a/lib/librdkafka-2.3.0/tests/interceptor_test/Makefile b/lib/librdkafka-2.4.0/tests/interceptor_test/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/tests/interceptor_test/Makefile rename to lib/librdkafka-2.4.0/tests/interceptor_test/Makefile diff --git a/lib/librdkafka-2.3.0/tests/interceptor_test/interceptor_test.c b/lib/librdkafka-2.4.0/tests/interceptor_test/interceptor_test.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/interceptor_test/interceptor_test.c rename to lib/librdkafka-2.4.0/tests/interceptor_test/interceptor_test.c diff --git a/lib/librdkafka-2.3.0/tests/interceptor_test/interceptor_test.h b/lib/librdkafka-2.4.0/tests/interceptor_test/interceptor_test.h similarity index 100% rename from lib/librdkafka-2.3.0/tests/interceptor_test/interceptor_test.h rename to lib/librdkafka-2.4.0/tests/interceptor_test/interceptor_test.h diff --git a/lib/librdkafka-2.3.0/tests/java/.gitignore b/lib/librdkafka-2.4.0/tests/java/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/.gitignore rename to lib/librdkafka-2.4.0/tests/java/.gitignore diff --git a/lib/librdkafka-2.3.0/tests/java/IncrementalRebalanceCli.java b/lib/librdkafka-2.4.0/tests/java/IncrementalRebalanceCli.java similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/IncrementalRebalanceCli.java rename to lib/librdkafka-2.4.0/tests/java/IncrementalRebalanceCli.java diff --git a/lib/librdkafka-2.3.0/tests/java/Makefile b/lib/librdkafka-2.4.0/tests/java/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/Makefile rename to lib/librdkafka-2.4.0/tests/java/Makefile diff --git a/lib/librdkafka-2.3.0/tests/java/Murmur2Cli.java b/lib/librdkafka-2.4.0/tests/java/Murmur2Cli.java similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/Murmur2Cli.java rename to lib/librdkafka-2.4.0/tests/java/Murmur2Cli.java diff --git a/lib/librdkafka-2.3.0/tests/java/README.md b/lib/librdkafka-2.4.0/tests/java/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/README.md rename to lib/librdkafka-2.4.0/tests/java/README.md diff --git a/lib/librdkafka-2.3.0/tests/java/TransactionProducerCli.java b/lib/librdkafka-2.4.0/tests/java/TransactionProducerCli.java similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/TransactionProducerCli.java rename to lib/librdkafka-2.4.0/tests/java/TransactionProducerCli.java diff --git a/lib/librdkafka-2.3.0/tests/java/run-class.sh b/lib/librdkafka-2.4.0/tests/java/run-class.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/java/run-class.sh rename to lib/librdkafka-2.4.0/tests/java/run-class.sh diff --git a/lib/librdkafka-2.3.0/tests/librdkafka.suppressions b/lib/librdkafka-2.4.0/tests/librdkafka.suppressions similarity index 100% rename from lib/librdkafka-2.3.0/tests/librdkafka.suppressions rename to lib/librdkafka-2.4.0/tests/librdkafka.suppressions diff --git a/lib/librdkafka-2.3.0/tests/lz4_manual_test.sh b/lib/librdkafka-2.4.0/tests/lz4_manual_test.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/lz4_manual_test.sh rename to lib/librdkafka-2.4.0/tests/lz4_manual_test.sh diff --git a/lib/librdkafka-2.3.0/tests/multi-broker-version-test.sh b/lib/librdkafka-2.4.0/tests/multi-broker-version-test.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/multi-broker-version-test.sh rename to lib/librdkafka-2.4.0/tests/multi-broker-version-test.sh diff --git a/lib/librdkafka-2.3.0/tests/parse-refcnt.sh b/lib/librdkafka-2.4.0/tests/parse-refcnt.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/parse-refcnt.sh rename to lib/librdkafka-2.4.0/tests/parse-refcnt.sh diff --git a/lib/librdkafka-2.3.0/tests/performance_plot.py b/lib/librdkafka-2.4.0/tests/performance_plot.py similarity index 100% rename from lib/librdkafka-2.3.0/tests/performance_plot.py rename to lib/librdkafka-2.4.0/tests/performance_plot.py diff --git a/lib/librdkafka-2.3.0/tests/plugin_test/Makefile b/lib/librdkafka-2.4.0/tests/plugin_test/Makefile similarity index 100% rename from lib/librdkafka-2.3.0/tests/plugin_test/Makefile rename to lib/librdkafka-2.4.0/tests/plugin_test/Makefile diff --git a/lib/librdkafka-2.3.0/tests/plugin_test/plugin_test.c b/lib/librdkafka-2.4.0/tests/plugin_test/plugin_test.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/plugin_test/plugin_test.c rename to lib/librdkafka-2.4.0/tests/plugin_test/plugin_test.c diff --git a/lib/librdkafka-2.4.0/tests/requirements.txt b/lib/librdkafka-2.4.0/tests/requirements.txt new file mode 100644 index 00000000000..bd7777d3a10 --- /dev/null +++ b/lib/librdkafka-2.4.0/tests/requirements.txt @@ -0,0 +1,2 @@ +trivup/trivup-0.12.4.tar.gz +jsoncomment diff --git a/lib/librdkafka-2.3.0/tests/run-consumer-tests.sh b/lib/librdkafka-2.4.0/tests/run-consumer-tests.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/run-consumer-tests.sh rename to lib/librdkafka-2.4.0/tests/run-consumer-tests.sh diff --git a/lib/librdkafka-2.3.0/tests/run-producer-tests.sh b/lib/librdkafka-2.4.0/tests/run-producer-tests.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/run-producer-tests.sh rename to lib/librdkafka-2.4.0/tests/run-producer-tests.sh diff --git a/lib/librdkafka-2.3.0/tests/run-test.sh b/lib/librdkafka-2.4.0/tests/run-test.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/run-test.sh rename to lib/librdkafka-2.4.0/tests/run-test.sh diff --git a/lib/librdkafka-2.3.0/tests/rusage.c b/lib/librdkafka-2.4.0/tests/rusage.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/rusage.c rename to lib/librdkafka-2.4.0/tests/rusage.c diff --git a/lib/librdkafka-2.3.0/tests/sasl_test.py b/lib/librdkafka-2.4.0/tests/sasl_test.py similarity index 89% rename from lib/librdkafka-2.3.0/tests/sasl_test.py rename to lib/librdkafka-2.4.0/tests/sasl_test.py index 9cb7d194a13..1260c72b1fe 100755 --- a/lib/librdkafka-2.3.0/tests/sasl_test.py +++ b/lib/librdkafka-2.4.0/tests/sasl_test.py @@ -9,11 +9,10 @@ # gradle in your PATH from cluster_testing import ( - LibrdkafkaTestCluster, print_report_summary, print_test_report_summary, read_scenario_conf) -from LibrdkafkaTestApp import LibrdkafkaTestApp +from broker_version_tests import test_it import os import sys @@ -22,52 +21,6 @@ import tempfile -def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False, - scenario="default"): - """ - @brief Create, deploy and start a Kafka cluster using Kafka \\p version - Then run librdkafka's regression tests. - """ - - cluster = LibrdkafkaTestCluster( - version, conf, debug=debug, scenario=scenario) - - # librdkafka's regression tests, as an App. - rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests, - scenario=scenario) - rdkafka.do_cleanup = False - rdkafka.local_tests = False - - if deploy: - cluster.deploy() - - cluster.start(timeout=30) - - print( - '# Connect to cluster with bootstrap.servers %s' % - cluster.bootstrap_servers()) - rdkafka.start() - print( - '# librdkafka regression tests started, logs in %s' % - rdkafka.root_path()) - try: - rdkafka.wait_stopped(timeout=60 * 30) - rdkafka.dbg( - 'wait stopped: %s, runtime %ds' % - (rdkafka.state, rdkafka.runtime())) - except KeyboardInterrupt: - print('# Aborted by user') - - report = rdkafka.report() - if report is not None: - report['root_path'] = rdkafka.root_path() - - cluster.stop(force=True) - - cluster.cleanup() - return report - - def handle_report(report, version, suite): """ Parse test report and return tuple (Passed(bool), Reason(str)) """ test_cnt = report.get('tests_run', 0) @@ -137,6 +90,13 @@ def handle_report(report, version, suite): help='Only run matching suite(s) (substring match)') parser.add_argument('versions', type=str, default=None, nargs='*', help='Limit broker versions to these') + parser.add_argument( + '--kraft', + dest='kraft', + action='store_true', + default=False, + help='Run in KRaft mode') + args = parser.parse_args() conf = dict() @@ -280,7 +240,8 @@ def handle_report(report, version, suite): tests_to_run = tests report = test_it(version, tests=tests_to_run, conf=_conf, rdkconf=_rdkconf, - debug=args.debug, scenario=args.scenario) + debug=args.debug, scenario=args.scenario, + kraft=args.kraft) # Handle test report report['version'] = version diff --git a/lib/librdkafka-2.3.0/tests/scenarios/README.md b/lib/librdkafka-2.4.0/tests/scenarios/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/scenarios/README.md rename to lib/librdkafka-2.4.0/tests/scenarios/README.md diff --git a/lib/librdkafka-2.3.0/tests/scenarios/ak23.json b/lib/librdkafka-2.4.0/tests/scenarios/ak23.json similarity index 100% rename from lib/librdkafka-2.3.0/tests/scenarios/ak23.json rename to lib/librdkafka-2.4.0/tests/scenarios/ak23.json diff --git a/lib/librdkafka-2.3.0/tests/scenarios/default.json b/lib/librdkafka-2.4.0/tests/scenarios/default.json similarity index 100% rename from lib/librdkafka-2.3.0/tests/scenarios/default.json rename to lib/librdkafka-2.4.0/tests/scenarios/default.json diff --git a/lib/librdkafka-2.3.0/tests/scenarios/noautocreate.json b/lib/librdkafka-2.4.0/tests/scenarios/noautocreate.json similarity index 100% rename from lib/librdkafka-2.3.0/tests/scenarios/noautocreate.json rename to lib/librdkafka-2.4.0/tests/scenarios/noautocreate.json diff --git a/lib/librdkafka-2.3.0/tests/sockem.c b/lib/librdkafka-2.4.0/tests/sockem.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/sockem.c rename to lib/librdkafka-2.4.0/tests/sockem.c diff --git a/lib/librdkafka-2.3.0/tests/sockem.h b/lib/librdkafka-2.4.0/tests/sockem.h similarity index 100% rename from lib/librdkafka-2.3.0/tests/sockem.h rename to lib/librdkafka-2.4.0/tests/sockem.h diff --git a/lib/librdkafka-2.3.0/tests/sockem_ctrl.c b/lib/librdkafka-2.4.0/tests/sockem_ctrl.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/sockem_ctrl.c rename to lib/librdkafka-2.4.0/tests/sockem_ctrl.c diff --git a/lib/librdkafka-2.3.0/tests/sockem_ctrl.h b/lib/librdkafka-2.4.0/tests/sockem_ctrl.h similarity index 100% rename from lib/librdkafka-2.3.0/tests/sockem_ctrl.h rename to lib/librdkafka-2.4.0/tests/sockem_ctrl.h diff --git a/lib/librdkafka-2.3.0/tests/test.c b/lib/librdkafka-2.4.0/tests/test.c similarity index 98% rename from lib/librdkafka-2.3.0/tests/test.c rename to lib/librdkafka-2.4.0/tests/test.c index 2ef4a36c3a9..83487f5e5ce 100644 --- a/lib/librdkafka-2.3.0/tests/test.c +++ b/lib/librdkafka-2.4.0/tests/test.c @@ -48,17 +48,18 @@ int test_level = 2; int test_seed = 0; -char test_mode[64] = "bare"; -char test_scenario[64] = "default"; -static volatile sig_atomic_t test_exit = 0; -static char test_topic_prefix[128] = "rdkafkatest"; -static int test_topic_random = 0; -int tests_running_cnt = 0; -int test_concurrent_max = 5; -int test_assert_on_fail = 0; -double test_timeout_multiplier = 1.0; -static char *test_sql_cmd = NULL; -int test_session_timeout_ms = 6000; +char test_mode[64] = "bare"; +char test_scenario[64] = "default"; +static volatile sig_atomic_t test_exit = 0; +static char test_topic_prefix[128] = "rdkafkatest"; +static int test_topic_random = 0; +int tests_running_cnt = 0; +int test_concurrent_max = 5; +int test_assert_on_fail = 0; +double test_timeout_multiplier = 1.0; +static char *test_sql_cmd = NULL; +int test_session_timeout_ms = 6000; +static const char *test_consumer_group_protocol_str = NULL; int test_broker_version; static const char *test_broker_version_str = "2.4.0.0"; int test_flags = 0; @@ -187,6 +188,7 @@ _TEST_DECL(0073_headers); _TEST_DECL(0074_producev); _TEST_DECL(0075_retry); _TEST_DECL(0076_produce_retry); +_TEST_DECL(0076_produce_retry_mock); _TEST_DECL(0077_compaction); _TEST_DECL(0078_c_from_cpp); _TEST_DECL(0079_fork); @@ -257,6 +259,8 @@ _TEST_DECL(0140_commit_metadata); _TEST_DECL(0142_reauthentication); _TEST_DECL(0143_exponential_backoff_mock); _TEST_DECL(0144_idempotence_mock); +_TEST_DECL(0145_pause_resume_mock); +_TEST_DECL(0146_metadata_mock); /* Manual tests */ _TEST_DECL(8000_idle); @@ -418,6 +422,7 @@ struct test tests[] = { _TEST(0075_retry, TEST_F_SOCKEM), #endif _TEST(0076_produce_retry, TEST_F_SOCKEM), + _TEST(0076_produce_retry_mock, TEST_F_LOCAL), _TEST(0077_compaction, 0, /* The test itself requires message headers */ @@ -511,6 +516,8 @@ struct test tests[] = { _TEST(0142_reauthentication, 0, TEST_BRKVER(2, 2, 0, 0)), _TEST(0143_exponential_backoff_mock, TEST_F_LOCAL), _TEST(0144_idempotence_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0145_pause_resume_mock, TEST_F_LOCAL), + _TEST(0146_metadata_mock, TEST_F_LOCAL), /* Manual tests */ @@ -763,6 +770,9 @@ static void test_init(void) { exit(1); } } + test_consumer_group_protocol_str = + test_getenv("TEST_CONSUMER_GROUP_PROTOCOL", NULL); + #ifdef _WIN32 test_init_win32(); @@ -1803,17 +1813,14 @@ int main(int argc, char **argv) { TEST_SAY("Git version: %s\n", test_git_version); - if (!strcmp(test_broker_version_str, "trunk")) - test_broker_version_str = "9.9.9.9"; /* for now */ - d = 0; if (sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d) < 3) { - printf( - "%% Expected broker version to be in format " - "N.N.N (N=int), not %s\n", - test_broker_version_str); - exit(1); + TEST_SAY( + "Non-numeric broker version, setting version" + " to 9.9.9.9\n"); + test_broker_version_str = "9.9.9.9"; + sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d); } test_broker_version = TEST_BRKVER(a, b, c, d); TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", test_broker_version_str, @@ -2035,7 +2042,10 @@ rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf) { test_conf_set(conf, "client.id", test_curr->name); } - + if (mode == RD_KAFKA_CONSUMER && test_consumer_group_protocol_str) { + test_conf_set(conf, "group.protocol", + test_consumer_group_protocol_str); + } /* Creat kafka instance */ rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr)); @@ -7127,7 +7137,63 @@ rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt, return mcluster; } +/** + * @brief Get current number of matching requests, + * received by mock cluster \p mcluster, matching + * function \p match , called with opaque \p opaque . + */ +static size_t test_mock_get_matching_request_cnt( + rd_kafka_mock_cluster_t *mcluster, + rd_bool_t (*match)(rd_kafka_mock_request_t *request, void *opaque), + void *opaque) { + size_t i; + size_t request_cnt; + rd_kafka_mock_request_t **requests; + size_t matching_request_cnt = 0; + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + for (i = 0; i < request_cnt; i++) { + if (match(requests[i], opaque)) + matching_request_cnt++; + } + + rd_kafka_mock_request_destroy_array(requests, request_cnt); + return matching_request_cnt; +} + +/** + * @brief Wait that at least \p expected_cnt matching requests + * have been received by the mock cluster, + * using match function \p match , + * plus \p confidence_interval_ms has passed + * + * @param expected_cnt Number of expected matching request + * @param confidence_interval_ms Time to wait after \p expected_cnt matching + * requests have been seen + * @param match Match function that takes a request and \p opaque + * @param opaque Opaque value needed by function \p match + * + * @return Number of matching requests received. + */ +size_t test_mock_wait_matching_requests( + rd_kafka_mock_cluster_t *mcluster, + size_t expected_cnt, + int confidence_interval_ms, + rd_bool_t (*match)(rd_kafka_mock_request_t *request, void *opaque), + void *opaque) { + size_t matching_request_cnt = 0; + + while (matching_request_cnt < expected_cnt) { + matching_request_cnt = + test_mock_get_matching_request_cnt(mcluster, match, opaque); + if (matching_request_cnt < expected_cnt) + rd_usleep(100 * 1000, 0); + } + + rd_usleep(confidence_interval_ms * 1000, 0); + return test_mock_get_matching_request_cnt(mcluster, match, opaque); +} /** * @name Sub-tests @@ -7234,3 +7300,17 @@ void test_sub_skip(const char *fmt, ...) { test_sub_reset(); } + +const char *test_consumer_group_protocol() { + return test_consumer_group_protocol_str; +} + +int test_consumer_group_protocol_generic() { + return !test_consumer_group_protocol_str || + !strcmp(test_consumer_group_protocol_str, "classic"); +} + +int test_consumer_group_protocol_consumer() { + return test_consumer_group_protocol_str && + !strcmp(test_consumer_group_protocol_str, "consumer"); +} diff --git a/lib/librdkafka-2.3.0/tests/test.conf.example b/lib/librdkafka-2.4.0/tests/test.conf.example similarity index 100% rename from lib/librdkafka-2.3.0/tests/test.conf.example rename to lib/librdkafka-2.4.0/tests/test.conf.example diff --git a/lib/librdkafka-2.3.0/tests/test.h b/lib/librdkafka-2.4.0/tests/test.h similarity index 96% rename from lib/librdkafka-2.3.0/tests/test.h rename to lib/librdkafka-2.4.0/tests/test.h index 671472b43a0..c7f07ccbded 100644 --- a/lib/librdkafka-2.3.0/tests/test.h +++ b/lib/librdkafka-2.4.0/tests/test.h @@ -240,6 +240,20 @@ static RD_INLINE RD_UNUSED void rtrim(char *str) { TEST_UNLOCK(); \ } while (0) +#define TEST_SKIP_MOCK_CLUSTER(RET) \ + if (test_needs_auth()) { \ + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); \ + return RET; \ + } \ + if (test_consumer_group_protocol() && \ + strcmp(test_consumer_group_protocol(), "classic")) { \ + TEST_SKIP( \ + "Mock cluster cannot be used " \ + "with group.protocol=%s\n", \ + test_consumer_group_protocol()); \ + return RET; \ + } + void test_conf_init(rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, @@ -845,14 +859,24 @@ rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms); void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster); rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt, const char **bootstraps); - - +size_t test_mock_wait_matching_requests( + rd_kafka_mock_cluster_t *mcluster, + size_t num, + int confidence_interval_ms, + rd_bool_t (*match)(rd_kafka_mock_request_t *request, void *opaque), + void *opaque); int test_error_is_not_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason); +const char *test_consumer_group_protocol(); + +int test_consumer_group_protocol_generic(); + +int test_consumer_group_protocol_consumer(); + /** * @brief Calls rdkafka function (with arguments) * and checks its return value (must be rd_kafka_resp_err_t) for diff --git a/lib/librdkafka-2.3.0/tests/testcpp.cpp b/lib/librdkafka-2.4.0/tests/testcpp.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/testcpp.cpp rename to lib/librdkafka-2.4.0/tests/testcpp.cpp diff --git a/lib/librdkafka-2.3.0/tests/testcpp.h b/lib/librdkafka-2.4.0/tests/testcpp.h similarity index 100% rename from lib/librdkafka-2.3.0/tests/testcpp.h rename to lib/librdkafka-2.4.0/tests/testcpp.h diff --git a/lib/librdkafka-2.3.0/tests/testshared.h b/lib/librdkafka-2.4.0/tests/testshared.h similarity index 100% rename from lib/librdkafka-2.3.0/tests/testshared.h rename to lib/librdkafka-2.4.0/tests/testshared.h diff --git a/lib/librdkafka-2.3.0/tests/tools/README.md b/lib/librdkafka-2.4.0/tests/tools/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/tools/README.md rename to lib/librdkafka-2.4.0/tests/tools/README.md diff --git a/lib/librdkafka-2.3.0/tests/tools/stats/README.md b/lib/librdkafka-2.4.0/tests/tools/stats/README.md similarity index 100% rename from lib/librdkafka-2.3.0/tests/tools/stats/README.md rename to lib/librdkafka-2.4.0/tests/tools/stats/README.md diff --git a/lib/librdkafka-2.3.0/tests/tools/stats/filter.jq b/lib/librdkafka-2.4.0/tests/tools/stats/filter.jq similarity index 100% rename from lib/librdkafka-2.3.0/tests/tools/stats/filter.jq rename to lib/librdkafka-2.4.0/tests/tools/stats/filter.jq diff --git a/lib/librdkafka-2.3.0/tests/tools/stats/graph.py b/lib/librdkafka-2.4.0/tests/tools/stats/graph.py similarity index 100% rename from lib/librdkafka-2.3.0/tests/tools/stats/graph.py rename to lib/librdkafka-2.4.0/tests/tools/stats/graph.py diff --git a/lib/librdkafka-2.3.0/tests/tools/stats/requirements.txt b/lib/librdkafka-2.4.0/tests/tools/stats/requirements.txt similarity index 100% rename from lib/librdkafka-2.3.0/tests/tools/stats/requirements.txt rename to lib/librdkafka-2.4.0/tests/tools/stats/requirements.txt diff --git a/lib/librdkafka-2.3.0/tests/tools/stats/to_csv.py b/lib/librdkafka-2.4.0/tests/tools/stats/to_csv.py similarity index 100% rename from lib/librdkafka-2.3.0/tests/tools/stats/to_csv.py rename to lib/librdkafka-2.4.0/tests/tools/stats/to_csv.py diff --git a/lib/librdkafka-2.3.0/tests/until-fail.sh b/lib/librdkafka-2.4.0/tests/until-fail.sh similarity index 100% rename from lib/librdkafka-2.3.0/tests/until-fail.sh rename to lib/librdkafka-2.4.0/tests/until-fail.sh diff --git a/lib/librdkafka-2.3.0/tests/xxxx-assign_partition.c b/lib/librdkafka-2.4.0/tests/xxxx-assign_partition.c similarity index 100% rename from lib/librdkafka-2.3.0/tests/xxxx-assign_partition.c rename to lib/librdkafka-2.4.0/tests/xxxx-assign_partition.c diff --git a/lib/librdkafka-2.3.0/tests/xxxx-metadata.cpp b/lib/librdkafka-2.4.0/tests/xxxx-metadata.cpp similarity index 100% rename from lib/librdkafka-2.3.0/tests/xxxx-metadata.cpp rename to lib/librdkafka-2.4.0/tests/xxxx-metadata.cpp diff --git a/lib/librdkafka-2.3.0/vcpkg.json b/lib/librdkafka-2.4.0/vcpkg.json similarity index 95% rename from lib/librdkafka-2.3.0/vcpkg.json rename to lib/librdkafka-2.4.0/vcpkg.json index f44a3be6829..5598809687e 100644 --- a/lib/librdkafka-2.3.0/vcpkg.json +++ b/lib/librdkafka-2.4.0/vcpkg.json @@ -1,6 +1,6 @@ { "name": "librdkafka", - "version": "2.3.0", + "version": "2.4.0", "dependencies": [ { "name": "zstd", diff --git a/lib/librdkafka-2.3.0/win32/.gitignore b/lib/librdkafka-2.4.0/win32/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/win32/.gitignore rename to lib/librdkafka-2.4.0/win32/.gitignore diff --git a/lib/librdkafka-2.3.0/win32/README.md b/lib/librdkafka-2.4.0/win32/README.md similarity index 100% rename from lib/librdkafka-2.3.0/win32/README.md rename to lib/librdkafka-2.4.0/win32/README.md diff --git a/lib/librdkafka-2.3.0/win32/build-package.bat b/lib/librdkafka-2.4.0/win32/build-package.bat similarity index 100% rename from lib/librdkafka-2.3.0/win32/build-package.bat rename to lib/librdkafka-2.4.0/win32/build-package.bat diff --git a/lib/librdkafka-2.3.0/win32/build.bat b/lib/librdkafka-2.4.0/win32/build.bat similarity index 100% rename from lib/librdkafka-2.3.0/win32/build.bat rename to lib/librdkafka-2.4.0/win32/build.bat diff --git a/lib/librdkafka-2.3.0/win32/common.vcxproj b/lib/librdkafka-2.4.0/win32/common.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/common.vcxproj rename to lib/librdkafka-2.4.0/win32/common.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/install-openssl.ps1 b/lib/librdkafka-2.4.0/win32/install-openssl.ps1 similarity index 100% rename from lib/librdkafka-2.3.0/win32/install-openssl.ps1 rename to lib/librdkafka-2.4.0/win32/install-openssl.ps1 diff --git a/lib/librdkafka-2.3.0/win32/interceptor_test/interceptor_test.vcxproj b/lib/librdkafka-2.4.0/win32/interceptor_test/interceptor_test.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/interceptor_test/interceptor_test.vcxproj rename to lib/librdkafka-2.4.0/win32/interceptor_test/interceptor_test.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/librdkafka.autopkg.template b/lib/librdkafka-2.4.0/win32/librdkafka.autopkg.template similarity index 100% rename from lib/librdkafka-2.3.0/win32/librdkafka.autopkg.template rename to lib/librdkafka-2.4.0/win32/librdkafka.autopkg.template diff --git a/lib/librdkafka-2.3.0/win32/librdkafka.master.testing.targets b/lib/librdkafka-2.4.0/win32/librdkafka.master.testing.targets similarity index 100% rename from lib/librdkafka-2.3.0/win32/librdkafka.master.testing.targets rename to lib/librdkafka-2.4.0/win32/librdkafka.master.testing.targets diff --git a/lib/librdkafka-2.3.0/win32/librdkafka.sln b/lib/librdkafka-2.4.0/win32/librdkafka.sln similarity index 100% rename from lib/librdkafka-2.3.0/win32/librdkafka.sln rename to lib/librdkafka-2.4.0/win32/librdkafka.sln diff --git a/lib/librdkafka-2.3.0/win32/librdkafka.vcxproj b/lib/librdkafka-2.4.0/win32/librdkafka.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/librdkafka.vcxproj rename to lib/librdkafka-2.4.0/win32/librdkafka.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/librdkafkacpp/librdkafkacpp.vcxproj b/lib/librdkafka-2.4.0/win32/librdkafkacpp/librdkafkacpp.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/librdkafkacpp/librdkafkacpp.vcxproj rename to lib/librdkafka-2.4.0/win32/librdkafkacpp/librdkafkacpp.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/msbuild.ps1 b/lib/librdkafka-2.4.0/win32/msbuild.ps1 similarity index 100% rename from lib/librdkafka-2.3.0/win32/msbuild.ps1 rename to lib/librdkafka-2.4.0/win32/msbuild.ps1 diff --git a/lib/librdkafka-2.3.0/win32/openssl_engine_example/openssl_engine_example.vcxproj b/lib/librdkafka-2.4.0/win32/openssl_engine_example/openssl_engine_example.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/openssl_engine_example/openssl_engine_example.vcxproj rename to lib/librdkafka-2.4.0/win32/openssl_engine_example/openssl_engine_example.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/package-zip.ps1 b/lib/librdkafka-2.4.0/win32/package-zip.ps1 similarity index 100% rename from lib/librdkafka-2.3.0/win32/package-zip.ps1 rename to lib/librdkafka-2.4.0/win32/package-zip.ps1 diff --git a/lib/librdkafka-2.3.0/win32/packages/repositories.config b/lib/librdkafka-2.4.0/win32/packages/repositories.config similarity index 100% rename from lib/librdkafka-2.3.0/win32/packages/repositories.config rename to lib/librdkafka-2.4.0/win32/packages/repositories.config diff --git a/lib/librdkafka-2.3.0/win32/push-package.bat b/lib/librdkafka-2.4.0/win32/push-package.bat similarity index 100% rename from lib/librdkafka-2.3.0/win32/push-package.bat rename to lib/librdkafka-2.4.0/win32/push-package.bat diff --git a/lib/librdkafka-2.3.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj b/lib/librdkafka-2.4.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj rename to lib/librdkafka-2.4.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/rdkafka_example/rdkafka_example.vcxproj b/lib/librdkafka-2.4.0/win32/rdkafka_example/rdkafka_example.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/rdkafka_example/rdkafka_example.vcxproj rename to lib/librdkafka-2.4.0/win32/rdkafka_example/rdkafka_example.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/rdkafka_performance/rdkafka_performance.vcxproj b/lib/librdkafka-2.4.0/win32/rdkafka_performance/rdkafka_performance.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/rdkafka_performance/rdkafka_performance.vcxproj rename to lib/librdkafka-2.4.0/win32/rdkafka_performance/rdkafka_performance.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/setup-msys2.ps1 b/lib/librdkafka-2.4.0/win32/setup-msys2.ps1 similarity index 100% rename from lib/librdkafka-2.3.0/win32/setup-msys2.ps1 rename to lib/librdkafka-2.4.0/win32/setup-msys2.ps1 diff --git a/lib/librdkafka-2.3.0/win32/setup-vcpkg.ps1 b/lib/librdkafka-2.4.0/win32/setup-vcpkg.ps1 similarity index 59% rename from lib/librdkafka-2.3.0/win32/setup-vcpkg.ps1 rename to lib/librdkafka-2.4.0/win32/setup-vcpkg.ps1 index c2bd78b84e2..79dee94cb80 100644 --- a/lib/librdkafka-2.3.0/win32/setup-vcpkg.ps1 +++ b/lib/librdkafka-2.4.0/win32/setup-vcpkg.ps1 @@ -5,8 +5,7 @@ if (!(Test-Path -Path vcpkg/.git)) { } cd vcpkg -# latest version is having an issue while doing vcpkg integrate install -git checkout 328bd79eb8340b8958f567aaf5f8ffb81056cd36 +git checkout 2023.11.20 cd .. .\vcpkg\bootstrap-vcpkg.bat diff --git a/lib/librdkafka-2.3.0/win32/tests/.gitignore b/lib/librdkafka-2.4.0/win32/tests/.gitignore similarity index 100% rename from lib/librdkafka-2.3.0/win32/tests/.gitignore rename to lib/librdkafka-2.4.0/win32/tests/.gitignore diff --git a/lib/librdkafka-2.3.0/win32/tests/test.conf.example b/lib/librdkafka-2.4.0/win32/tests/test.conf.example similarity index 100% rename from lib/librdkafka-2.3.0/win32/tests/test.conf.example rename to lib/librdkafka-2.4.0/win32/tests/test.conf.example diff --git a/lib/librdkafka-2.3.0/win32/tests/tests.vcxproj b/lib/librdkafka-2.4.0/win32/tests/tests.vcxproj similarity index 99% rename from lib/librdkafka-2.3.0/win32/tests/tests.vcxproj rename to lib/librdkafka-2.4.0/win32/tests/tests.vcxproj index 6a48f527d88..a354f278f8e 100644 --- a/lib/librdkafka-2.3.0/win32/tests/tests.vcxproj +++ b/lib/librdkafka-2.4.0/win32/tests/tests.vcxproj @@ -225,6 +225,8 @@ + + diff --git a/lib/librdkafka-2.3.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj b/lib/librdkafka-2.4.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj similarity index 100% rename from lib/librdkafka-2.3.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj rename to lib/librdkafka-2.4.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj diff --git a/lib/librdkafka-2.3.0/win32/wingetopt.c b/lib/librdkafka-2.4.0/win32/wingetopt.c similarity index 100% rename from lib/librdkafka-2.3.0/win32/wingetopt.c rename to lib/librdkafka-2.4.0/win32/wingetopt.c diff --git a/lib/librdkafka-2.3.0/win32/wingetopt.h b/lib/librdkafka-2.4.0/win32/wingetopt.h similarity index 100% rename from lib/librdkafka-2.3.0/win32/wingetopt.h rename to lib/librdkafka-2.4.0/win32/wingetopt.h diff --git a/lib/librdkafka-2.3.0/win32/wintime.h b/lib/librdkafka-2.4.0/win32/wintime.h similarity index 100% rename from lib/librdkafka-2.3.0/win32/wintime.h rename to lib/librdkafka-2.4.0/win32/wintime.h