From 9d1ad77a0896db920b092dea73799b8c29debc6a Mon Sep 17 00:00:00 2001 From: Fu Zhe Date: Thu, 31 Mar 2022 20:20:29 +0800 Subject: [PATCH] remove useless clickhouse applications (#4539) ref pingcap/tiflash#4538 --- dbms/src/Server/Benchmark.cpp | 514 ------ dbms/src/Server/CMakeLists.txt | 81 +- dbms/src/Server/Compressor.cpp | 141 -- dbms/src/Server/ExtractFromConfig.cpp | 100 -- dbms/src/Server/Format.cpp | 82 - dbms/src/Server/LocalServer.cpp | 515 ------ dbms/src/Server/LocalServer.h | 64 - dbms/src/Server/PerformanceTest.cpp | 1522 ----------------- dbms/src/Server/clickhouse-benchmark.cpp | 16 - dbms/src/Server/clickhouse-compressor.cpp | 16 - .../Server/clickhouse-extract-from-config.cpp | 16 - dbms/src/Server/clickhouse-format.cpp | 16 - dbms/src/Server/clickhouse-local.cpp | 16 - .../Server/clickhouse-performance-test.cpp | 16 - dbms/src/Server/config_tools.h.in | 5 - dbms/src/Server/main.cpp | 38 - 16 files changed, 1 insertion(+), 3157 deletions(-) delete mode 100644 dbms/src/Server/Benchmark.cpp delete mode 100644 dbms/src/Server/Compressor.cpp delete mode 100644 dbms/src/Server/ExtractFromConfig.cpp delete mode 100644 dbms/src/Server/Format.cpp delete mode 100644 dbms/src/Server/LocalServer.cpp delete mode 100644 dbms/src/Server/LocalServer.h delete mode 100644 dbms/src/Server/PerformanceTest.cpp delete mode 100644 dbms/src/Server/clickhouse-benchmark.cpp delete mode 100644 dbms/src/Server/clickhouse-compressor.cpp delete mode 100644 dbms/src/Server/clickhouse-extract-from-config.cpp delete mode 100644 dbms/src/Server/clickhouse-format.cpp delete mode 100644 dbms/src/Server/clickhouse-local.cpp delete mode 100644 dbms/src/Server/clickhouse-performance-test.cpp diff --git a/dbms/src/Server/Benchmark.cpp b/dbms/src/Server/Benchmark.cpp deleted file mode 100644 index de67ff42356..00000000000 --- a/dbms/src/Server/Benchmark.cpp +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "InterruptListener.h" - - -/** A tool for evaluating ClickHouse performance. - * The tool emulates a case with fixed amount of simultaneously executing queries. - */ - -namespace DB -{ -namespace ErrorCodes -{ -extern const int POCO_EXCEPTION; -extern const int STD_EXCEPTION; -extern const int UNKNOWN_EXCEPTION; -extern const int BAD_ARGUMENTS; -} // namespace ErrorCodes - -class Benchmark -{ -public: - Benchmark( - unsigned concurrency_, - double delay_, - const String & host_, - UInt16 port_, - const String & default_database_, - const String & user_, - const String & password_, - const String & stage, - bool randomize_, - size_t max_iterations_, - double max_time_, - const String & json_path_, - const ConnectionTimeouts & timeouts, - const Settings & settings_) - : concurrency(concurrency_) - , delay(delay_) - , queue(concurrency) - , connections(concurrency, host_, port_, default_database_, user_, password_, timeouts) - , randomize(randomize_) - , max_iterations(max_iterations_) - , max_time(max_time_) - , json_path(json_path_) - , settings(settings_) - , global_context(Context::createGlobal()) - , pool(concurrency) - { - std::cerr << std::fixed << std::setprecision(3); - - /// This is needed to receive blocks with columns of AggregateFunction data type - /// (example: when using stage = 'with_mergeable_state') - registerAggregateFunctions(); - - if (stage == "complete") - query_processing_stage = QueryProcessingStage::Complete; - else if (stage == "fetch_columns") - query_processing_stage = QueryProcessingStage::FetchColumns; - else if (stage == "with_mergeable_state") - query_processing_stage = QueryProcessingStage::WithMergeableState; - else - throw Exception("Unknown query processing stage: " + stage, ErrorCodes::BAD_ARGUMENTS); - - if (!json_path.empty() && Poco::File(json_path).exists()) /// Clear file with previous results - { - Poco::File(json_path).remove(); - } - - readQueries(); - run(); - } - -private: - using Query = std::string; - - unsigned concurrency; - double delay; - - using Queries = std::vector; - Queries queries; - - using Queue = ConcurrentBoundedQueue; - Queue queue; - - ConnectionPool connections; - bool randomize; - size_t max_iterations; - double max_time; - String json_path; - Settings settings; - Context global_context; - QueryProcessingStage::Enum query_processing_stage; - - /// Don't execute new queries after timelimit or SIGINT or exception - std::atomic shutdown{false}; - - struct Stats - { - Stopwatch watch; - std::atomic queries{0}; - size_t read_rows = 0; - size_t read_bytes = 0; - size_t result_rows = 0; - size_t result_bytes = 0; - - using Sampler = ReservoirSampler; - Sampler sampler{1 << 16}; - - void add(double seconds, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc) - { - ++queries; - read_rows += read_rows_inc; - read_bytes += read_bytes_inc; - result_rows += result_rows_inc; - result_bytes += result_bytes_inc; - sampler.insert(seconds); - } - - void clear() - { - watch.restart(); - queries = 0; - read_rows = 0; - read_bytes = 0; - result_rows = 0; - result_bytes = 0; - sampler.clear(); - } - }; - - Stats info_per_interval; - Stats info_total; - Stopwatch delay_watch; - - std::mutex mutex; - - ThreadPool pool; - - - void readQueries() - { - ReadBufferFromFileDescriptor in(STDIN_FILENO); - - while (!in.eof()) - { - std::string query; - readText(query, in); - assertChar('\n', in); - - if (!query.empty()) - queries.emplace_back(query); - } - - if (queries.empty()) - throw Exception("Empty list of queries."); - - std::cerr << "Loaded " << queries.size() << " queries.\n"; - } - - - void printNumberOfQueriesExecuted(size_t num) - { - std::cerr << "\nQueries executed: " << num; - if (queries.size() > 1) - std::cerr << " (" << (num * 100.0 / queries.size()) << "%)"; - std::cerr << ".\n"; - } - - /// Try push new query and check cancellation conditions - bool tryPushQueryInteractively(const String & query, InterruptListener & interrupt_listener) - { - bool inserted = false; - - while (!inserted) - { - inserted = queue.tryPush(query, 100); - - if (shutdown) - { - /// An exception occurred in a worker - return false; - } - - if (max_time > 0 && info_total.watch.elapsedSeconds() >= max_time) - { - std::cout << "Stopping launch of queries. Requested time limit is exhausted.\n"; - return false; - } - - if (interrupt_listener.check()) - { - std::cout << "Stopping launch of queries. SIGINT recieved.\n"; - return false; - } - - if (delay > 0 && delay_watch.elapsedSeconds() > delay) - { - printNumberOfQueriesExecuted(info_total.queries); - report(info_per_interval); - delay_watch.restart(); - } - }; - - return true; - } - - void run() - { - pcg64 generator(randomSeed()); - std::uniform_int_distribution distribution(0, queries.size() - 1); - - for (size_t i = 0; i < concurrency; ++i) - pool.schedule([this, conn = connections.get()] { thread(conn); }); - - InterruptListener interrupt_listener; - info_per_interval.watch.restart(); - delay_watch.restart(); - - /// Push queries into queue - for (size_t i = 0; !max_iterations || i < max_iterations; ++i) - { - size_t query_index = randomize ? distribution(generator) : i % queries.size(); - - if (!tryPushQueryInteractively(queries[query_index], interrupt_listener)) - break; - } - - shutdown = true; - pool.wait(); - info_total.watch.stop(); - - if (!json_path.empty()) - reportJSON(info_total, json_path); - - printNumberOfQueriesExecuted(info_total.queries); - report(info_total); - } - - - void thread(ConnectionPool::Entry connection) - { - Query query; - - try - { - /// In these threads we do not accept INT signal. - sigset_t sig_set; - if (sigemptyset(&sig_set) - || sigaddset(&sig_set, SIGINT) - || pthread_sigmask(SIG_BLOCK, &sig_set, nullptr)) - throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL); - - while (true) - { - bool extracted = false; - - while (!extracted) - { - extracted = queue.tryPop(query, 100); - - if (shutdown) - return; - } - - execute(connection, query); - } - } - catch (...) - { - shutdown = true; - std::cerr << "An error occurred while processing query:\n" - << query << "\n"; - throw; - } - } - - - void execute(ConnectionPool::Entry & connection, Query & query) - { - Stopwatch watch; - RemoteBlockInputStream stream(*connection, query, {}, global_context, &settings, nullptr, Tables(), query_processing_stage); - - Progress progress; - stream.setProgressCallback([&progress](const Progress & value) { progress.incrementPiecewiseAtomically(value); }); - - stream.readPrefix(); - while (Block block = stream.read()) - ; - stream.readSuffix(); - - const BlockStreamProfileInfo & info = stream.getProfileInfo(); - - double seconds = watch.elapsedSeconds(); - - std::lock_guard lock(mutex); - info_per_interval.add(seconds, progress.rows, progress.bytes, info.rows, info.bytes); - info_total.add(seconds, progress.rows, progress.bytes, info.rows, info.bytes); - } - - - void report(Stats & info) - { - std::lock_guard lock(mutex); - - /// Avoid zeros, nans or exceptions - if (0 == info.queries) - return; - - double seconds = info.watch.elapsedSeconds(); - - std::cerr - << "\n" - << "QPS: " << (info.queries / seconds) << ", " - << "RPS: " << (info.read_rows / seconds) << ", " - << "MiB/s: " << (info.read_bytes / seconds / 1048576) << ", " - << "result RPS: " << (info.result_rows / seconds) << ", " - << "result MiB/s: " << (info.result_bytes / seconds / 1048576) << "." - << "\n"; - - auto print_percentile = [&](double percent) { - std::cerr << percent << "%\t" << info.sampler.quantileInterpolated(percent / 100.0) << " sec." << std::endl; - }; - - for (int percent = 0; percent <= 90; percent += 10) - print_percentile(percent); - - print_percentile(95); - print_percentile(99); - print_percentile(99.9); - print_percentile(99.99); - - info.clear(); - } - - void reportJSON(Stats & info, const std::string & filename) - { - WriteBufferFromFile json_out(filename); - - std::lock_guard lock(mutex); - - auto print_key_value = [&](auto key, auto value, bool with_comma = true) { - json_out << double_quote << key << ": " << value << (with_comma ? ",\n" : "\n"); - }; - - auto print_percentile = [&json_out, &info](auto percent, bool with_comma = true) { - json_out << "\"" << percent << "\"" - << ": " << info.sampler.quantileInterpolated(percent / 100.0) << (with_comma ? ",\n" : "\n"); - }; - - json_out << "{\n"; - - json_out << double_quote << "statistics" - << ": {\n"; - - double seconds = info.watch.elapsedSeconds(); - print_key_value("QPS", info.queries / seconds); - print_key_value("RPS", info.read_rows / seconds); - print_key_value("MiBPS", info.read_bytes / seconds); - print_key_value("RPS_result", info.result_rows / seconds); - print_key_value("MiBPS_result", info.result_bytes / seconds); - print_key_value("num_queries", info.queries.load(), false); - - json_out << "},\n"; - - json_out << double_quote << "query_time_percentiles" - << ": {\n"; - - for (int percent = 0; percent <= 90; percent += 10) - print_percentile(percent); - - print_percentile(95); - print_percentile(99); - print_percentile(99.9); - print_percentile(99.99, false); - - json_out << "}\n"; - - json_out << "}\n"; - } - -public: - ~Benchmark() - { - shutdown = true; - } -}; - -} // namespace DB - - -int mainEntryClickHouseBenchmark(int argc, char ** argv) -{ - using namespace DB; - bool print_stacktrace = true; - - try - { - using boost::program_options::value; - - boost::program_options::options_description desc("Allowed options"); - // clang-format off - desc.add_options() - ("help", "produce help message") - ("concurrency,c", value()->default_value(1), "number of parallel queries") - ("delay,d", value()->default_value(1), "delay between intermediate reports in seconds (set 0 to disable reports)") - ("stage", value()->default_value("complete"), "request query processing up to specified stage") - ("iterations,i", value()->default_value(0), "amount of queries to be executed") - ("timelimit,t", value()->default_value(0.), "stop launch of queries after specified time limit") - ("randomize,r", value()->default_value(false), "randomize order of execution") - ("json", value()->default_value(""), "write final report to specified file in JSON format") - ("host,h", value()->default_value("localhost"), "") - ("port", value()->default_value(9000), "") - ("user", value()->default_value("default"), "") - ("password", value()->default_value(""), "") - ("database", value()->default_value("default"), "") - ("stacktrace", "print stack traces of exceptions") - -#define DECLARE_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) (#NAME, boost::program_options::value(), DESCRIPTION) - APPLY_FOR_SETTINGS(DECLARE_SETTING) -#undef DECLARE_SETTING - ; - // clang-format on - - boost::program_options::variables_map options; - boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); - - if (options.count("help")) - { - std::cout << "Usage: " << argv[0] << " [options] < queries.txt\n"; - std::cout << desc << "\n"; - return 1; - } - - print_stacktrace = options.count("stacktrace"); - - /// Extract `settings` and `limits` from received `options` - Settings settings; - -#define EXTRACT_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) \ - if (options.count(#NAME)) \ - settings.set(#NAME, options[#NAME].as()); - APPLY_FOR_SETTINGS(EXTRACT_SETTING) -#undef EXTRACT_SETTING - - Benchmark benchmark( - options["concurrency"].as(), - options["delay"].as(), - options["host"].as(), - options["port"].as(), - options["database"].as(), - options["user"].as(), - options["password"].as(), - options["stage"].as(), - options["randomize"].as(), - options["iterations"].as(), - options["timelimit"].as(), - options["json"].as(), - ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings), - settings); - } - catch (...) - { - std::cerr << getCurrentExceptionMessage(print_stacktrace, true) << std::endl; - return getCurrentExceptionCode(); - } - - return 0; -} diff --git a/dbms/src/Server/CMakeLists.txt b/dbms/src/Server/CMakeLists.txt index 61ac2f626d5..7eeff6fcb83 100644 --- a/dbms/src/Server/CMakeLists.txt +++ b/dbms/src/Server/CMakeLists.txt @@ -20,10 +20,6 @@ option(ENABLE_CLICKHOUSE_ALL "Enable all tools" ON) option(ENABLE_CLICKHOUSE_SERVER "Enable server" ${ENABLE_CLICKHOUSE_ALL}) option(ENABLE_CLICKHOUSE_CLIENT "Enable client" ${ENABLE_CLICKHOUSE_ALL}) -option(ENABLE_CLICKHOUSE_LOCAL "Enable local" OFF) -option(ENABLE_CLICKHOUSE_BENCHMARK "Enable benchmark" OFF) -option(ENABLE_CLICKHOUSE_PERFORMANCE "Enable performance" OFF) -option(ENABLE_CLICKHOUSE_TOOLS "Enable tools: compressor format extract-from-config-lib" OFF) option(ENABLE_TIFLASH_DTTOOL "Enable dttool: tools to manage dmfile" ${ENABLE_CLICKHOUSE_ALL}) option(ENABLE_TIFLASH_DTWORKLOAD "Enable dtworkload: tools to test and stress DeltaTree" ${ENABLE_CLICKHOUSE_ALL}) @@ -48,30 +44,10 @@ target_link_libraries(clickhouse-server-lib PRIVATE ${TIFLASH_PROXY_LIBRARY}) target_link_libraries (clickhouse-server-lib PUBLIC clickhouse_common_io daemon clickhouse_storages_system clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions) target_include_directories (clickhouse-server-lib PUBLIC ${TiFlash_SOURCE_DIR}/libs/libdaemon/include) -add_library (clickhouse-local-lib LocalServer.cpp) -target_link_libraries (clickhouse-local-lib clickhouse-server-lib clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions) - -add_library (clickhouse-extract-from-config-lib ${SPLIT_SHARED} ExtractFromConfig.cpp) -target_link_libraries (clickhouse-extract-from-config-lib clickhouse_common_config clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY}) - add_library (clickhouse-client-lib Client.cpp) target_link_libraries (clickhouse-client-lib clickhouse_functions clickhouse_aggregate_functions ${LINE_EDITING_LIBS} ${Boost_PROGRAM_OPTIONS_LIBRARY}) target_include_directories (clickhouse-client-lib PRIVATE ${READLINE_INCLUDE_DIR}) -add_library (clickhouse-benchmark-lib ${SPLIT_SHARED} Benchmark.cpp) -target_link_libraries (clickhouse-benchmark-lib clickhouse-client-lib clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY}) -target_include_directories (clickhouse-benchmark-lib PRIVATE ${PCG_RANDOM_INCLUDE_DIR}) - -add_library (clickhouse-performance-test-lib ${SPLIT_SHARED} PerformanceTest.cpp) -target_link_libraries (clickhouse-performance-test-lib clickhouse_common_io dbms ${Boost_PROGRAM_OPTIONS_LIBRARY}) -target_include_directories (clickhouse-performance-test-lib PRIVATE ${PCG_RANDOM_INCLUDE_DIR}) - -add_library (clickhouse-compressor-lib ${SPLIT_SHARED} Compressor.cpp) -target_link_libraries (clickhouse-compressor-lib clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY}) - -add_library (clickhouse-format-lib ${SPLIT_SHARED} Format.cpp) -target_link_libraries (clickhouse-format-lib dbms clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY}) - add_library(tiflash-dttool-lib ${SPLIT_SHARED} DTTool/DTToolBench.cpp DTTool/DTToolMigrate.cpp DTTool/DTToolInspect.cpp) target_link_libraries(tiflash-dttool-lib PUBLIC dbms daemon clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY}) target_include_directories(tiflash-dttool-lib PUBLIC ${TiFlash_SOURCE_DIR}/libs/libdaemon/include) @@ -90,24 +66,11 @@ if (CLICKHOUSE_SPLIT_BINARY) target_link_libraries (clickhouse-server clickhouse-server-lib) add_executable (clickhouse-client clickhouse-client.cpp) target_link_libraries (clickhouse-client clickhouse-client-lib) - add_executable (clickhouse-local clickhouse-local.cpp) - target_link_libraries (clickhouse-local clickhouse-local-lib) - add_executable (clickhouse-benchmark clickhouse-benchmark.cpp) - target_link_libraries (clickhouse-benchmark clickhouse-benchmark-lib clickhouse_aggregate_functions) - add_executable (clickhouse-performance-test clickhouse-performance-test.cpp) - target_link_libraries (clickhouse-performance-test clickhouse-performance-test-lib dbms) - add_executable (clickhouse-extract-from-config clickhouse-extract-from-config.cpp) - target_link_libraries (clickhouse-extract-from-config clickhouse-extract-from-config-lib) # Also in utils - add_executable (clickhouse-compressor clickhouse-compressor.cpp) - target_link_libraries (clickhouse-compressor clickhouse-compressor-lib) - add_executable (clickhouse-format clickhouse-format.cpp) - target_link_libraries (clickhouse-format clickhouse-format-lib) add_executable (tiflash-dttool DTTool/tiflash-dttool.cpp) target_link_libraries (tiflash-dttool tiflash-dttool-entry-object) - set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-performance-test - clickhouse-extract-from-config clickhouse-format) + set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-extract-from-config clickhouse-format) if (USE_EMBEDDED_COMPILER) add_executable (clickhouse-clang clickhouse-clang.cpp) @@ -199,28 +162,12 @@ else () if (ENABLE_CLICKHOUSE_CLIENT) target_link_libraries (tiflash clickhouse-client-lib) endif () - if (ENABLE_CLICKHOUSE_LOCAL) - target_link_libraries (tiflash clickhouse-local-lib) - endif () - if (ENABLE_CLICKHOUSE_BENCHMARK) - target_link_libraries (tiflash clickhouse-benchmark-lib) - endif () - if (ENABLE_CLICKHOUSE_PERFORMANCE) - target_link_libraries (tiflash clickhouse-performance-test-lib) - endif () if (ENABLE_TIFLASH_DTTOOL) target_link_libraries(tiflash tiflash-dttool-entry-object) endif () if (ENABLE_TIFLASH_DTWORKLOAD) target_link_libraries(tiflash dt-workload-lib) endif () - if (ENABLE_CLICKHOUSE_TOOLS) - target_link_libraries (tiflash - clickhouse-extract-from-config-lib - clickhouse-compressor-lib - clickhouse-format-lib - ) - endif () set (CLICKHOUSE_BUNDLE) if (ENABLE_CLICKHOUSE_SERVER) @@ -233,37 +180,11 @@ else () install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tiflash) list(APPEND CLICKHOUSE_BUNDLE clickhouse-client) endif () - if (ENABLE_CLICKHOUSE_LOCAL) - add_custom_target (clickhouse-local ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-local DEPENDS tiflash) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tiflash) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-local) - endif () - if (ENABLE_CLICKHOUSE_BENCHMARK) - add_custom_target (clickhouse-benchmark ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-benchmark DEPENDS tiflash) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tiflash) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-benchmark) - endif () - if (ENABLE_CLICKHOUSE_PERFORMANCE) - add_custom_target (clickhouse-performance-test ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-performance-test DEPENDS tiflash) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-performance-test DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tiflash) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-performance-test) - endif () if (ENABLE_TIFLASH_DTTOOL) add_custom_target (tiflash-dttool ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash tiflash-dttool DEPENDS tiflash) install (FILES ${CMAKE_CURRENT_BINARY_DIR}/tiflash-dttool DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tiflash) list(APPEND CLICKHOUSE_BUNDLE tiflash-dttool) endif () - if (ENABLE_CLICKHOUSE_TOOLS) - add_custom_target (clickhouse-extract-from-config ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-extract-from-config DEPENDS tiflash) - add_custom_target (clickhouse-compressor ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-compressor DEPENDS tiflash) - add_custom_target (clickhouse-format ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-format DEPENDS tiflash) - install (FILES - ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config - ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor - ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format - DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tiflash) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-extract-from-config clickhouse-compressor clickhouse-format) - endif () # install always because depian package want this files: add_custom_target (clickhouse-clang ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-clang DEPENDS tiflash) add_custom_target (clickhouse-lld ALL COMMAND ${CMAKE_COMMAND} -E create_symlink tiflash clickhouse-lld DEPENDS tiflash) diff --git a/dbms/src/Server/Compressor.cpp b/dbms/src/Server/Compressor.cpp deleted file mode 100644 index f8be6c466fb..00000000000 --- a/dbms/src/Server/Compressor.cpp +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - namespace ErrorCodes - { - extern const int TOO_LARGE_SIZE_COMPRESSED; - } -} - - -namespace -{ - -/// Outputs sizes of uncompressed and compressed blocks for compressed file. -void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) -{ - while (!in.eof()) - { - in.ignore(16); /// checksum - - char header[COMPRESSED_BLOCK_HEADER_SIZE]; - in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); - - UInt32 size_compressed = unalignedLoad(&header[1]); - - if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) - throw DB::Exception("Too large size_compressed. Most likely corrupted data.", DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED); - - UInt32 size_decompressed = unalignedLoad(&header[5]); - - DB::writeText(size_decompressed, out); - DB::writeChar('\t', out); - DB::writeText(size_compressed, out); - DB::writeChar('\n', out); - - in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); - } -} - -} - - -int mainEntryClickHouseCompressor(int argc, char ** argv) -{ - boost::program_options::options_description desc("Allowed options"); - desc.add_options() - ("help,h", "produce help message") - ("decompress,d", "decompress") - ("block-size,b", boost::program_options::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") - ("hc", "use LZ4HC instead of LZ4") - ("zstd", "use ZSTD instead of LZ4") - ("level", "compression level") - ("none", "use no compression instead of LZ4") - ("stat", "print block statistics of compressed data") - ; - - boost::program_options::variables_map options; - boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); - - if (options.count("help")) - { - std::cout << "Usage: " << argv[0] << " [options] < in > out" << std::endl; - std::cout << desc << std::endl; - return 1; - } - - try - { - bool decompress = options.count("decompress"); - bool use_lz4hc = options.count("hc"); - bool use_zstd = options.count("zstd"); - bool stat_mode = options.count("stat"); - bool use_none = options.count("none"); - unsigned block_size = options["block-size"].as(); - - DB::CompressionMethod method = DB::CompressionMethod::LZ4; - - if (use_lz4hc) - method = DB::CompressionMethod::LZ4HC; - else if (use_zstd) - method = DB::CompressionMethod::ZSTD; - else if (use_none) - method = DB::CompressionMethod::NONE; - - DB::CompressionSettings settings(method, options.count("level") > 0 ? options["level"].as() : DB::CompressionSettings::getDefaultLevel(method)); - - DB::ReadBufferFromFileDescriptor rb(STDIN_FILENO); - DB::WriteBufferFromFileDescriptor wb(STDOUT_FILENO); - - if (stat_mode) - { - /// Output statistic for compressed file. - checkAndWriteHeader(rb, wb); - } - else if (decompress) - { - /// Decompression - DB::CompressedReadBuffer from(rb); - DB::copyData(from, wb); - } - else - { - /// Compression - DB::CompressedWriteBuffer to(wb, settings, block_size); - DB::copyData(rb, to); - } - } - catch (...) - { - std::cerr << DB::getCurrentExceptionMessage(true); - return DB::getCurrentExceptionCode(); - } - - return 0; -} diff --git a/dbms/src/Server/ExtractFromConfig.cpp b/dbms/src/Server/ExtractFromConfig.cpp deleted file mode 100644 index 7ce29358227..00000000000 --- a/dbms/src/Server/ExtractFromConfig.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static void setupLogging(const std::string & log_level) -{ - Poco::AutoPtr channel(new Poco::ConsoleChannel); - Poco::AutoPtr formatter(new Poco::PatternFormatter); - formatter->setProperty("pattern", "%L%Y-%m-%d %H:%M:%S.%i <%p> %s: %t"); - Poco::AutoPtr formatting_channel(new Poco::FormattingChannel(formatter, channel)); - Poco::Logger::root().setChannel(formatting_channel); - Poco::Logger::root().setLevel(log_level); -} - -static std::string extractFromConfig( - const std::string & config_path, const std::string & key, bool try_get = false) -{ - ConfigProcessor processor(config_path, /* log_to_console = */ false); - auto config_conf = processor.processConfig(); - ConfigurationPtr configuration(new DB::TOMLConfiguration(config_conf)); - // do not throw exception if not found - if (try_get) - return configuration->getString(key, ""); - return configuration->getString(key); -} - -int mainEntryClickHouseExtractFromConfig(int argc, char ** argv) -{ - bool print_stacktrace = false; - bool try_get = false; - std::string log_level; - std::string config_path; - std::string key; - - namespace po = boost::program_options; - - po::options_description options_desc("Allowed options"); - options_desc.add_options() - ("help", "produce this help message") - ("stacktrace", po::bool_switch(&print_stacktrace), "print stack traces of exceptions") - ("try", po::bool_switch(&try_get), "Do not warn about missing keys") - ("log-level", po::value(&log_level)->default_value("error"), "log level") - ("config-file,c", po::value(&config_path)->required(), "path to config file") - ("key,k", po::value(&key)->required(), "key to get value for"); - - po::positional_options_description positional_desc; - positional_desc.add("config-file", 1); - positional_desc.add("key", 1); - - try - { - po::variables_map options; - po::store(po::command_line_parser(argc, argv).options(options_desc).positional(positional_desc).run(), options); - - if (options.count("help")) - { - std::cerr << "Preprocess config file and extract value of the given key." << std::endl - << std::endl; - std::cerr << "Usage: clickhouse extract-from-config [options]" << std::endl - << std::endl; - std::cerr << options_desc << std::endl; - return 0; - } - - po::notify(options); - - setupLogging(log_level); - std::cout << extractFromConfig(config_path, key, try_get) << std::endl; - } - catch (...) - { - std::cerr << DB::getCurrentExceptionMessage(print_stacktrace, true) << std::endl; - return DB::getCurrentExceptionCode(); - } - - return 0; -} diff --git a/dbms/src/Server/Format.cpp b/dbms/src/Server/Format.cpp deleted file mode 100644 index 9b70514a308..00000000000 --- a/dbms/src/Server/Format.cpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include -#include -#include -#include -#include - - -int mainEntryClickHouseFormat(int argc, char ** argv) -{ - using namespace DB; - - boost::program_options::options_description desc("Allowed options"); - desc.add_options() - ("help,h", "produce help message") - ("hilite", "add syntax highlight with ANSI terminal escape sequences") - ("oneline", "format in single line") - ("quiet,q", "just check syntax, no output on success") - ; - - boost::program_options::variables_map options; - boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); - - if (options.count("help")) - { - std::cout << "Usage: " << argv[0] << " [options] < query" << std::endl; - std::cout << desc << std::endl; - return 1; - } - - try - { - bool hilite = options.count("hilite"); - bool oneline = options.count("oneline"); - bool quiet = options.count("quiet"); - - if (quiet && (hilite || oneline)) - { - std::cerr << "Options 'hilite' or 'oneline' have no sense in 'quiet' mode." << std::endl; - return 2; - } - - String query; - ReadBufferFromFileDescriptor in(STDIN_FILENO); - readStringUntilEOF(query, in); - - const char * pos = query.data(); - const char * end = pos + query.size(); - - ParserQuery parser(end); - ASTPtr res = parseQuery(parser, pos, end, "query", 0); - - if (!quiet) - { - formatAST(*res, std::cout, hilite, oneline); - std::cout << std::endl; - } - } - catch (...) - { - std::cerr << getCurrentExceptionMessage(true); - return getCurrentExceptionCode(); - } - - return 0; -} diff --git a/dbms/src/Server/LocalServer.cpp b/dbms/src/Server/LocalServer.cpp deleted file mode 100644 index f7abb352823..00000000000 --- a/dbms/src/Server/LocalServer.cpp +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "LocalServer.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "StatusFile.h" - - -namespace DB -{ -namespace ErrorCodes -{ -extern const int SYNTAX_ERROR; -extern const int CANNOT_LOAD_CONFIG; -} // namespace ErrorCodes - - -LocalServer::LocalServer() = default; - -LocalServer::~LocalServer() -{ - if (context) - context->shutdown(); /// required for properly exception handling -} - - -void LocalServer::initialize(Poco::Util::Application & self) -{ - Poco::Util::Application::initialize(self); - - // Turn off server logging to stderr - if (config().has("silent")) - { - Poco::Logger::root().setLevel("none"); - Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); - } -} - - -void LocalServer::defineOptions(Poco::Util::OptionSet & _options) -{ - Poco::Util::Application::defineOptions(_options); - - _options.addOption( - Poco::Util::Option("config-file", "", "Load configuration from a given file") - .required(false) - .repeatable(false) - .argument("[config.toml]") - .binding("config-file")); - - /// Arguments that define first query creating initial table: - /// (If structure argument is omitted then initial query is not generated) - _options.addOption( - Poco::Util::Option("structure", "S", "Structure of initial table(list columns names with their types)") - .required(false) - .repeatable(false) - .argument("[name Type]") - .binding("table-structure")); - - /// Turn off logging - _options.addOption( - Poco::Util::Option("silent", "s", "Quiet mode, print only errors") - .required(false) - .repeatable(false) - .binding("silent")); - - _options.addOption( - Poco::Util::Option("table", "N", "Name of initial table") - .required(false) - .repeatable(false) - .argument("[table]") - .binding("table-name")); - - _options.addOption( - Poco::Util::Option("file", "f", "Path to file with data of initial table (stdin if not specified)") - .required(false) - .repeatable(false) - .argument(" stdin") - .binding("table-file")); - - _options.addOption( - Poco::Util::Option("input-format", "if", "Input format of initial table data") - .required(false) - .repeatable(false) - .argument("") - .binding("table-data-format")); - - /// List of queries to execute - _options.addOption( - Poco::Util::Option("query", "q", "Queries to execute") - .required(false) - .repeatable(false) - .argument("") - .binding("query")); - - /// Default Output format - _options.addOption( - Poco::Util::Option("output-format", "of", "Default output format") - .required(false) - .repeatable(false) - .argument("[TSV]", true) - .binding("output-format")); - - /// Alias for previous one, required for clickhouse-client compatibility - _options.addOption( - Poco::Util::Option("format", "", "Default output format") - .required(false) - .repeatable(false) - .argument("[TSV]", true) - .binding("format")); - - _options.addOption( - Poco::Util::Option("stacktrace", "", "Print stack traces of exceptions") - .required(false) - .repeatable(false) - .binding("stacktrace")); - - _options.addOption( - Poco::Util::Option("verbose", "", "Print info about execution of queries") - .required(false) - .repeatable(false) - .noArgument() - .binding("verbose")); - - _options.addOption( - Poco::Util::Option("help", "", "Display help information") - .required(false) - .repeatable(false) - .noArgument() - .binding("help") - .callback(Poco::Util::OptionCallback(this, &LocalServer::handleHelp))); - - /// These arrays prevent "variable tracking size limit exceeded" compiler notice. - static const char * settings_names[] = { -#define DECLARE_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) #NAME, - APPLY_FOR_SETTINGS(DECLARE_SETTING) -#undef DECLARE_SETTING - nullptr}; - - for (const char ** name = settings_names; *name; ++name) - _options.addOption(Poco::Util::Option(*name, "", "Settings.h").required(false).argument("").repeatable(false).binding(*name)); -} - - -void LocalServer::applyOptions() -{ - context->setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); - - /// settings and limits could be specified in config file, but passed settings has higher priority -#define EXTRACT_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) \ - if (config().has(#NAME) && !context->getSettingsRef().NAME.changed) \ - context->setSetting(#NAME, config().getString(#NAME)); - APPLY_FOR_SETTINGS(EXTRACT_SETTING) -#undef EXTRACT_SETTING -} - - -void LocalServer::displayHelp() -{ - Poco::Util::HelpFormatter helpFormatter(options()); - helpFormatter.setCommand(commandName()); - helpFormatter.setUsage("[initial table definition] [--query ]"); - helpFormatter.setHeader("\n" - "clickhouse-local allows to execute SQL queries on your data files via single command line call.\n" - "To do so, intially you need to define your data source and its format.\n" - "After you can execute your SQL queries in the usual manner.\n" - "There are two ways to define initial table keeping your data:\n" - "either just in first query like this:\n" - " CREATE TABLE () ENGINE = File(, );\n" - "either through corresponding command line parameters."); - helpFormatter.setWidth(132); /// 80 is ugly due to wide settings params - - helpFormatter.format(std::cerr); - std::cerr << "Example printing memory used by each Unix user:\n" - "ps aux | tail -n +2 | awk '{ printf(\"%s\\t%s\\n\", $1, $4) }' | " - "clickhouse-local -S \"user String, mem Float64\" -q \"SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty\"\n"; -} - - -void LocalServer::handleHelp(const std::string & /*name*/, const std::string & /*value*/) -{ - displayHelp(); - stopOptionsProcessing(); -} - - -/// If path is specified and not empty, will try to setup server environment and load existing metadata -void LocalServer::tryInitPath() -{ - if (!config().has("path") || (path = config().getString("path")).empty()) - return; - - Poco::trimInPlace(path); - if (path.empty()) - return; - if (path.back() != '/') - path += '/'; - - context->setPath(path); - - StatusFile status{path + "status"}; -} - - -int LocalServer::main(const std::vector & /*args*/) -try -{ - Poco::Logger * log = &logger(); - - if (!config().has("query") && !config().has("table-structure")) /// Nothing to process - { - if (!config().hasOption("help")) - { - std::cerr << "There are no queries to process." << std::endl; - displayHelp(); - } - - return Application::EXIT_OK; - } - - /// Load config files if exists - if (config().has("config-file") || Poco::File("config.toml").exists()) - { - ConfigProcessor config_processor(config().getString("config-file", "config.toml"), true); - auto loaded_config = config_processor.loadConfig(); - config_processor.savePreprocessedConfig(loaded_config); - config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false); - } - - context = std::make_unique(Context::createGlobal()); - context->setGlobalContext(*context); - context->setApplicationType(Context::ApplicationType::LOCAL); - tryInitPath(); - - applyOptions(); - - /// Skip temp path installation - - /// We will terminate process on error - static KillingErrorHandler error_handler; - Poco::ErrorHandler::set(&error_handler); - - /// Don't initilaize DateLUT - - registerFunctions(); - registerAggregateFunctions(); - registerTableFunctions(); - registerStorages(); - - /// Maybe useless - if (config().has("macros")) - context->setMacros(std::make_unique(config(), "macros")); - - /// Skip networking - - setupUsers(); - - /// Limit on total number of concurrently executing queries. - /// Threre are no need for concurrent threads, override max_concurrent_queries. - context->getProcessList().setMaxSize(0); - - /// Size of cache for uncompressed blocks. Zero means disabled. - size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0); - if (uncompressed_cache_size) - context->setUncompressedCache(uncompressed_cache_size); - - /// Size of cache for marks (index of MergeTree family of tables). It is necessary. - /// Specify default value for mark_cache_size explicitly! - size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120); - if (mark_cache_size) - context->setMarkCache(mark_cache_size); - - /// Size of cache for minmax index, used by DeltaMerge engine. - size_t minmax_index_cache_size = config().has("minmax_index_cache_size") ? config().getUInt64("minmax_index_cache_size") : mark_cache_size; - if (minmax_index_cache_size) - context->setMinMaxIndexCache(minmax_index_cache_size); - - bool use_L0_opt = config().getBool("l0_optimize", true); - context->setUseL0Opt(use_L0_opt); - - /// Load global settings from default_profile and system_profile. - context->setDefaultProfiles(config()); - - /** Init dummy default DB - * NOTE: We force using isolated default database to avoid conflicts with default database from server enviroment - * Otherwise, metadata of temporary File(format, EXPLICIT_PATH) tables will pollute metadata/ directory; - * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. - */ - const std::string default_database = "_local"; - context->addDatabase(default_database, std::make_shared(default_database)); - context->setCurrentDatabase(default_database); - - if (!path.empty()) - { - LOG_FMT_DEBUG(log, "Loading metadata from {}", path); - loadMetadataSystem(*context); - attachSystemTables(); - loadMetadata(*context); - LOG_FMT_DEBUG(log, "Loaded metadata."); - } - else - { - attachSystemTables(); - } - - processQueries(); - - context->shutdown(); - context.reset(); - - return Application::EXIT_OK; -} -catch (const Exception & e) -{ - bool print_stack_trace = config().has("stacktrace"); - - std::string text = e.displayText(); - - auto embedded_stack_trace_pos = text.find("Stack trace"); - if (std::string::npos != embedded_stack_trace_pos && !print_stack_trace) - text.resize(embedded_stack_trace_pos); - - std::cerr << "Code: " << e.code() << ". " << text << std::endl - << std::endl; - - if (print_stack_trace && std::string::npos == embedded_stack_trace_pos) - { - std::cerr << "Stack trace:" << std::endl - << e.getStackTrace().toString(); - } - - /// If exception code isn't zero, we should return non-zero return code anyway. - return e.code() ? e.code() : -1; -} - - -inline String getQuotedString(const String & s) -{ - WriteBufferFromOwnString buf; - writeQuotedString(s, buf); - return buf.str(); -} - - -std::string LocalServer::getInitialCreateTableQuery() -{ - if (!config().has("table-structure")) - return {}; - - auto table_name = backQuoteIfNeed(config().getString("table-name", "table")); - auto table_structure = config().getString("table-structure"); - auto data_format = backQuoteIfNeed(config().getString("table-data-format", "TSV")); - String table_file; - if (!config().has("table-file") || config().getString("table-file") == "-") /// Use Unix tools stdin naming convention - table_file = "stdin"; - else /// Use regular file - table_file = getQuotedString(config().getString("table-file")); - - return "CREATE TABLE " + table_name + " (" + table_structure + ") " + "ENGINE = " - "File(" - + data_format + ", " + table_file + ")" - "; "; -} - - -void LocalServer::attachSystemTables() -{ - DatabasePtr system_database = context->tryGetDatabase("system"); - if (!system_database) - { - /// TODO: add attachTableDelayed into DatabaseMemory to speedup loading - system_database = std::make_shared("system"); - context->addDatabase("system", system_database); - } - - attachSystemTablesLocal(*system_database); -} - - -void LocalServer::processQueries() -{ - Poco::Logger * log = &logger(); - - String initial_create_query = getInitialCreateTableQuery(); - String queries_str = initial_create_query + config().getString("query"); - - bool verbose = config().hasOption("verbose"); - - std::vector queries; - auto parse_res = splitMultipartQuery(queries_str, queries); - - if (!parse_res.second) - throw Exception("Cannot parse and execute the following part of query: " + String(parse_res.first), ErrorCodes::SYNTAX_ERROR); - - context->setUser("default", "", Poco::Net::SocketAddress{}, ""); - context->setCurrentQueryId(""); - - for (const auto & query : queries) - { - ReadBufferFromString read_buf(query); - WriteBufferFromFileDescriptor write_buf(STDOUT_FILENO); - - if (verbose) - LOG_FMT_INFO(log, "Executing query: {}", query); - - executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, *context, {}); - } -} - -static const char * minimal_default_user_xml = "" - " " - " " - " " - " " - " " - " " - " " - " ::/0" - " " - " default" - " default" - " " - " " - " " - " " - " " - ""; - - -static ConfigurationPtr getConfigurationFromXMLString(const char * xml_data) -{ - std::stringstream ss{std::string{xml_data}}; - Poco::XML::InputSource input_source{ss}; - return {new Poco::Util::XMLConfiguration{&input_source}}; -} - - -void LocalServer::setupUsers() -{ - ConfigurationPtr users_config; - - if (config().has("users_config") || config().has("config-file") || Poco::File("config.toml").exists()) - { - const auto users_config_path = config().getString("users_config", config().getString("config-file", "config.toml")); - ConfigProcessor config_processor(users_config_path); - const auto loaded_config = config_processor.loadConfig(); - config_processor.savePreprocessedConfig(loaded_config); - users_config = loaded_config.configuration; - } - else - { - users_config = getConfigurationFromXMLString(minimal_default_user_xml); - } - - if (users_config) - context->setUsersConfig(users_config); - else - throw Exception("Can't load config for users", ErrorCodes::CANNOT_LOAD_CONFIG); -} - -} // namespace DB - -int mainEntryClickHouseLocal(int argc, char ** argv) -{ - DB::LocalServer app; - try - { - app.init(argc, argv); - return app.run(); - } - catch (...) - { - std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; - auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; - } -} diff --git a/dbms/src/Server/LocalServer.h b/dbms/src/Server/LocalServer.h deleted file mode 100644 index 89a4e61b857..00000000000 --- a/dbms/src/Server/LocalServer.h +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include - -namespace DB -{ - -class Context; - -/// Lightweight Application for clickhouse-local -/// No networking, no extra configs and working directories, no pid and status files, no dictionaries, no logging. -/// Quiet mode by default -class LocalServer : public Poco::Util::Application -{ -public: - - LocalServer(); - - void initialize(Poco::Util::Application & self) override; - - void defineOptions(Poco::Util::OptionSet& _options) override; - - int main(const std::vector & args) override; - - ~LocalServer(); - -private: - - /** Composes CREATE subquery based on passed arguments (--structure --file --table and --input-format) - * This query will be executed first, before queries passed through --query argument - * Returns empty string if it cannot compose that query. - */ - std::string getInitialCreateTableQuery(); - - void tryInitPath(); - void applyOptions(); - void attachSystemTables(); - void processQueries(); - void setupUsers(); - void displayHelp(); - void handleHelp(const std::string & name, const std::string & value); - -protected: - - std::unique_ptr context; - std::string path; -}; - -} diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp deleted file mode 100644 index 05c99ca8847..00000000000 --- a/dbms/src/Server/PerformanceTest.cpp +++ /dev/null @@ -1,1522 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "InterruptListener.h" - -/** Tests launcher for ClickHouse. - * The tool walks through given or default folder in order to find files with - * tests' descriptions and launches it. - */ -namespace fs = boost::filesystem; -using String = std::string; -const String FOUR_SPACES = " "; - -namespace DB -{ -namespace ErrorCodes -{ - extern const int POCO_EXCEPTION; - extern const int STD_EXCEPTION; - extern const int UNKNOWN_EXCEPTION; - extern const int NOT_IMPLEMENTED; -} - -static String pad(size_t padding) -{ - return String(padding * 4, ' '); -} - - -/// NOTE The code is totally wrong. -class JSONString -{ -private: - std::map content; - size_t padding; - -public: - explicit JSONString(size_t padding_ = 1) : padding(padding_){}; - - void set(const String key, String value, bool wrap = true) - { - if (value.empty()) - value = "null"; - - bool reserved = (value[0] == '[' || value[0] == '{' || value == "null"); - if (!reserved && wrap) - value = '"' + value + '"'; - - content[key] = value; - } - - template - std::enable_if_t> set(const String key, T value) - { - set(key, std::to_string(value), /*wrap= */ false); - } - - void set(const String key, const std::vector & run_infos) - { - String value = "[\n"; - - for (size_t i = 0; i < run_infos.size(); ++i) - { - value += pad(padding + 1) + run_infos[i].asString(padding + 2); - if (i != run_infos.size() - 1) - value += ','; - - value += "\n"; - } - - value += pad(padding) + ']'; - content[key] = value; - } - - String asString() const - { - return asString(padding); - } - String asString(size_t padding) const - { - String repr = "{"; - - for (auto it = content.begin(); it != content.end(); ++it) - { - if (it != content.begin()) - repr += ','; - /// construct "key": "value" string with padding - repr += "\n" + pad(padding) + '"' + it->first + '"' + ": " + it->second; - } - - repr += "\n" + pad(padding - 1) + '}'; - return repr; - } -}; - - -using ConfigurationPtr = Poco::AutoPtr; - -/// A set of supported stop conditions. -struct StopConditionsSet -{ - void loadFromConfig(const ConfigurationPtr & stop_conditions_view) - { - using Keys = std::vector; - Keys keys; - stop_conditions_view->keys(keys); - - for (const String & key : keys) - { - if (key == "total_time_ms") - total_time_ms.value = stop_conditions_view->getUInt64(key); - else if (key == "rows_read") - rows_read.value = stop_conditions_view->getUInt64(key); - else if (key == "bytes_read_uncompressed") - bytes_read_uncompressed.value = stop_conditions_view->getUInt64(key); - else if (key == "iterations") - iterations.value = stop_conditions_view->getUInt64(key); - else if (key == "min_time_not_changing_for_ms") - min_time_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); - else if (key == "max_speed_not_changing_for_ms") - max_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); - else if (key == "average_speed_not_changing_for_ms") - average_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); - else - throw DB::Exception("Met unkown stop condition: " + key); - - ++initialized_count; - } - } - - void reset() - { - total_time_ms.fulfilled = false; - rows_read.fulfilled = false; - bytes_read_uncompressed.fulfilled = false; - iterations.fulfilled = false; - min_time_not_changing_for_ms.fulfilled = false; - max_speed_not_changing_for_ms.fulfilled = false; - average_speed_not_changing_for_ms.fulfilled = false; - - fulfilled_count = 0; - } - - /// Note: only conditions with UInt64 minimal thresholds are supported. - /// I.e. condition is fulfilled when value is exceeded. - struct StopCondition - { - UInt64 value = 0; - bool fulfilled = false; - }; - - void report(UInt64 value, StopCondition & condition) - { - if (condition.value && !condition.fulfilled && value >= condition.value) - { - condition.fulfilled = true; - ++fulfilled_count; - } - } - - StopCondition total_time_ms; - StopCondition rows_read; - StopCondition bytes_read_uncompressed; - StopCondition iterations; - StopCondition min_time_not_changing_for_ms; - StopCondition max_speed_not_changing_for_ms; - StopCondition average_speed_not_changing_for_ms; - - size_t initialized_count = 0; - size_t fulfilled_count = 0; -}; - -/// Stop conditions for a test run. The running test will be terminated in either of two conditions: -/// 1. All conditions marked 'all_of' are fulfilled -/// or -/// 2. Any condition marked 'any_of' is fulfilled -class TestStopConditions -{ -public: - void loadFromConfig(ConfigurationPtr & stop_conditions_config) - { - if (stop_conditions_config->has("all_of")) - { - ConfigurationPtr config_all_of(stop_conditions_config->createView("all_of")); - conditions_all_of.loadFromConfig(config_all_of); - } - if (stop_conditions_config->has("any_of")) - { - ConfigurationPtr config_any_of(stop_conditions_config->createView("any_of")); - conditions_any_of.loadFromConfig(config_any_of); - } - } - - bool empty() const - { - return !conditions_all_of.initialized_count && !conditions_any_of.initialized_count; - } - -#define DEFINE_REPORT_FUNC(FUNC_NAME, CONDITION) \ - void FUNC_NAME(UInt64 value) \ - { \ - conditions_all_of.report(value, conditions_all_of.CONDITION); \ - conditions_any_of.report(value, conditions_any_of.CONDITION); \ - } - - DEFINE_REPORT_FUNC(reportTotalTime, total_time_ms); - DEFINE_REPORT_FUNC(reportRowsRead, rows_read); - DEFINE_REPORT_FUNC(reportBytesReadUncompressed, bytes_read_uncompressed); - DEFINE_REPORT_FUNC(reportIterations, iterations); - DEFINE_REPORT_FUNC(reportMinTimeNotChangingFor, min_time_not_changing_for_ms); - DEFINE_REPORT_FUNC(reportMaxSpeedNotChangingFor, max_speed_not_changing_for_ms); - DEFINE_REPORT_FUNC(reportAverageSpeedNotChangingFor, average_speed_not_changing_for_ms); - -#undef REPORT - - bool areFulfilled() const - { - return (conditions_all_of.initialized_count && conditions_all_of.fulfilled_count >= conditions_all_of.initialized_count) - || (conditions_any_of.initialized_count && conditions_any_of.fulfilled_count); - } - - void reset() - { - conditions_all_of.reset(); - conditions_any_of.reset(); - } - -private: - StopConditionsSet conditions_all_of; - StopConditionsSet conditions_any_of; -}; - -struct Stats -{ - Stopwatch watch; - Stopwatch watch_per_query; - Stopwatch min_time_watch; - Stopwatch max_rows_speed_watch; - Stopwatch max_bytes_speed_watch; - Stopwatch avg_rows_speed_watch; - Stopwatch avg_bytes_speed_watch; - - bool last_query_was_cancelled = false; - - size_t queries = 0; - - size_t total_rows_read = 0; - size_t total_bytes_read = 0; - - size_t last_query_rows_read = 0; - size_t last_query_bytes_read = 0; - - using Sampler = ReservoirSampler; - Sampler sampler{1 << 16}; - - /// min_time in ms - UInt64 min_time = std::numeric_limits::max(); - double total_time = 0; - - double max_rows_speed = 0; - double max_bytes_speed = 0; - - double avg_rows_speed_value = 0; - double avg_rows_speed_first = 0; - static double avg_rows_speed_precision; - - double avg_bytes_speed_value = 0; - double avg_bytes_speed_first = 0; - static double avg_bytes_speed_precision; - - size_t number_of_rows_speed_info_batches = 0; - size_t number_of_bytes_speed_info_batches = 0; - - bool ready = false; // check if a query wasn't interrupted by SIGINT - String exception; - - String getStatisticByName(const String & statistic_name) - { - if (statistic_name == "min_time") - { - return std::to_string(min_time) + "ms"; - } - if (statistic_name == "quantiles") - { - String result = "\n"; - - for (double percent = 10; percent <= 90; percent += 10) - { - result += FOUR_SPACES + std::to_string((percent / 100)); - result += ": " + std::to_string(sampler.quantileInterpolated(percent / 100.0)); - result += "\n"; - } - result += FOUR_SPACES + "0.95: " + std::to_string(sampler.quantileInterpolated(95 / 100.0)) + "\n"; - result += FOUR_SPACES + "0.99: " + std::to_string(sampler.quantileInterpolated(99 / 100.0)) + "\n"; - result += FOUR_SPACES + "0.999: " + std::to_string(sampler.quantileInterpolated(99.9 / 100.)) + "\n"; - result += FOUR_SPACES + "0.9999: " + std::to_string(sampler.quantileInterpolated(99.99 / 100.)); - - return result; - } - if (statistic_name == "total_time") - { - return std::to_string(total_time) + "s"; - } - if (statistic_name == "queries_per_second") - { - return std::to_string(queries / total_time); - } - if (statistic_name == "rows_per_second") - { - return std::to_string(total_rows_read / total_time); - } - if (statistic_name == "bytes_per_second") - { - return std::to_string(total_bytes_read / total_time); - } - - if (statistic_name == "max_rows_per_second") - { - return std::to_string(max_rows_speed); - } - if (statistic_name == "max_bytes_per_second") - { - return std::to_string(max_bytes_speed); - } - if (statistic_name == "avg_rows_per_second") - { - return std::to_string(avg_rows_speed_value); - } - if (statistic_name == "avg_bytes_per_second") - { - return std::to_string(avg_bytes_speed_value); - } - - return ""; - } - - void update_min_time(const UInt64 min_time_candidate) - { - if (min_time_candidate < min_time) - { - min_time = min_time_candidate; - min_time_watch.restart(); - } - } - - void update_average_speed(const double new_speed_info, - Stopwatch & avg_speed_watch, - size_t & number_of_info_batches, - double precision, - double & avg_speed_first, - double & avg_speed_value) - { - avg_speed_value = ((avg_speed_value * number_of_info_batches) + new_speed_info); - ++number_of_info_batches; - avg_speed_value /= number_of_info_batches; - - if (avg_speed_first == 0) - { - avg_speed_first = avg_speed_value; - } - - if (std::abs(avg_speed_value - avg_speed_first) >= precision) - { - avg_speed_first = avg_speed_value; - avg_speed_watch.restart(); - } - } - - void update_max_speed(const size_t max_speed_candidate, Stopwatch & max_speed_watch, double & max_speed) - { - if (max_speed_candidate > max_speed) - { - max_speed = max_speed_candidate; - max_speed_watch.restart(); - } - } - - void add(size_t rows_read_inc, size_t bytes_read_inc) - { - total_rows_read += rows_read_inc; - total_bytes_read += bytes_read_inc; - last_query_rows_read += rows_read_inc; - last_query_bytes_read += bytes_read_inc; - - double new_rows_speed = last_query_rows_read / watch_per_query.elapsedSeconds(); - double new_bytes_speed = last_query_bytes_read / watch_per_query.elapsedSeconds(); - - /// Update rows speed - update_max_speed(new_rows_speed, max_rows_speed_watch, max_rows_speed); - update_average_speed(new_rows_speed, - avg_rows_speed_watch, - number_of_rows_speed_info_batches, - avg_rows_speed_precision, - avg_rows_speed_first, - avg_rows_speed_value); - /// Update bytes speed - update_max_speed(new_bytes_speed, max_bytes_speed_watch, max_bytes_speed); - update_average_speed(new_bytes_speed, - avg_bytes_speed_watch, - number_of_bytes_speed_info_batches, - avg_bytes_speed_precision, - avg_bytes_speed_first, - avg_bytes_speed_value); - } - - void updateQueryInfo() - { - ++queries; - sampler.insert(watch_per_query.elapsedSeconds()); - update_min_time(watch_per_query.elapsed() / (1000 * 1000)); /// ns to ms - } - - void setTotalTime() - { - total_time = watch.elapsedSeconds(); - } - - void clear() - { - watch.restart(); - watch_per_query.restart(); - min_time_watch.restart(); - max_rows_speed_watch.restart(); - max_bytes_speed_watch.restart(); - avg_rows_speed_watch.restart(); - avg_bytes_speed_watch.restart(); - - last_query_was_cancelled = false; - - sampler.clear(); - - queries = 0; - total_rows_read = 0; - total_bytes_read = 0; - last_query_rows_read = 0; - last_query_bytes_read = 0; - - min_time = std::numeric_limits::max(); - total_time = 0; - max_rows_speed = 0; - max_bytes_speed = 0; - avg_rows_speed_value = 0; - avg_bytes_speed_value = 0; - avg_rows_speed_first = 0; - avg_bytes_speed_first = 0; - avg_rows_speed_precision = 0.001; - avg_bytes_speed_precision = 0.001; - number_of_rows_speed_info_batches = 0; - number_of_bytes_speed_info_batches = 0; - } -}; - -double Stats::avg_rows_speed_precision = 0.001; -double Stats::avg_bytes_speed_precision = 0.001; - -class PerformanceTest -{ -public: - using Strings = std::vector; - - PerformanceTest(const String & host_, - const UInt16 port_, - const String & default_database_, - const String & user_, - const String & password_, - const bool & lite_output_, - const String & profiles_file_, - Strings && input_files_, - Strings && tests_tags_, - Strings && skip_tags_, - Strings && tests_names_, - Strings && skip_names_, - Strings && tests_names_regexp_, - Strings && skip_names_regexp_, - const ConnectionTimeouts & timeouts) - : connection(host_, port_, default_database_, user_, password_, timeouts), - gotSIGINT(false), - lite_output(lite_output_), - profiles_file(profiles_file_), - input_files(input_files_), - tests_tags(std::move(tests_tags_)), - skip_tags(std::move(skip_tags_)), - tests_names(std::move(tests_names_)), - skip_names(std::move(skip_names_)), - tests_names_regexp(std::move(tests_names_regexp_)), - skip_names_regexp(std::move(skip_names_regexp_)) - { - if (input_files.size() < 1) - { - throw DB::Exception("No tests were specified", 0); - } - - std::string name; - UInt64 version_major; - UInt64 version_minor; - UInt64 version_revision; - connection.getServerVersion(name, version_major, version_minor, version_revision); - - std::stringstream ss; - ss << version_major << "." << version_minor << "." << version_revision; - server_version = ss.str(); - - processTestsConfigurations(input_files); - } - -private: - String test_name; - - using Query = String; - using Queries = std::vector; - using QueriesWithIndexes = std::vector>; - Queries queries; - - Connection connection; - std::string server_version; - - using Keys = std::vector; - - Settings settings; - Context global_context = Context::createGlobal(); - - InterruptListener interrupt_listener; - - using XMLConfiguration = Poco::Util::XMLConfiguration; - using XMLConfigurationPtr = Poco::AutoPtr; - - using Paths = std::vector; - using StringToVector = std::map>; - StringToVector substitutions; - - using StringKeyValue = std::map; - std::vector substitutions_maps; - - bool gotSIGINT; - std::vector stop_conditions_by_run; - String main_metric; - bool lite_output; - String profiles_file; - - Strings input_files; - std::vector tests_configurations; - - Strings tests_tags; - Strings skip_tags; - Strings tests_names; - Strings skip_names; - Strings tests_names_regexp; - Strings skip_names_regexp; - - enum class ExecutionType - { - Loop, - Once - }; - ExecutionType exec_type; - - enum class FilterType - { - Tag, - Name, - Name_regexp - }; - - size_t times_to_run = 1; - std::vector statistics_by_run; - - /// Removes configurations that has a given value. If leave is true, the logic is reversed. - void removeConfigurationsIf( - std::vector & configs, FilterType filter_type, const Strings & values, bool leave = false) - { - auto checker = [&filter_type, &values, &leave](XMLConfigurationPtr & config) - { - if (values.size() == 0) - return false; - - bool remove_or_not = false; - - if (filter_type == FilterType::Tag) - { - Keys tags_keys; - config->keys("tags", tags_keys); - - Strings tags(tags_keys.size()); - for (size_t i = 0; i != tags_keys.size(); ++i) - tags[i] = config->getString("tags.tag[" + std::to_string(i) + "]"); - - for (const String & config_tag : tags) - { - if (std::find(values.begin(), values.end(), config_tag) != values.end()) - remove_or_not = true; - } - } - - if (filter_type == FilterType::Name) - { - remove_or_not = (std::find(values.begin(), values.end(), config->getString("name", "")) != values.end()); - } - - if (filter_type == FilterType::Name_regexp) - { - String config_name = config->getString("name", ""); - auto regex_checker = [&config_name](const String & name_regexp) - { - std::regex pattern(name_regexp); - return std::regex_search(config_name, pattern); - }; - - remove_or_not = config->has("name") ? (std::find_if(values.begin(), values.end(), regex_checker) != values.end()) : false; - } - - if (leave) - remove_or_not = !remove_or_not; - return remove_or_not; - }; - - auto new_end = std::remove_if(configs.begin(), configs.end(), checker); - configs.erase(new_end, configs.end()); - } - - /// Filter tests by tags, names, regexp matching, etc. - void filterConfigurations() - { - /// Leave tests: - removeConfigurationsIf(tests_configurations, FilterType::Tag, tests_tags, true); - removeConfigurationsIf(tests_configurations, FilterType::Name, tests_names, true); - removeConfigurationsIf(tests_configurations, FilterType::Name_regexp, tests_names_regexp, true); - - - /// Skip tests - removeConfigurationsIf(tests_configurations, FilterType::Tag, skip_tags, false); - removeConfigurationsIf(tests_configurations, FilterType::Name, skip_names, false); - removeConfigurationsIf(tests_configurations, FilterType::Name_regexp, skip_names_regexp, false); - } - - /// Checks specified preconditions per test (process cache, table existence, etc.) - bool checkPreconditions(const XMLConfigurationPtr & config) - { - if (!config->has("preconditions")) - return true; - - Keys preconditions; - config->keys("preconditions", preconditions); - size_t table_precondition_index = 0; - - for (const String & precondition : preconditions) - { - if (precondition == "flush_disk_cache") - { - if (system( - "(>&2 echo 'Flushing disk cache...') && (sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches') && (>&2 echo 'Flushed.')")) - { - std::cerr << "Failed to flush disk cache" << std::endl; - return false; - } - } - - if (precondition == "ram_size") - { - size_t ram_size_needed = config->getUInt64("preconditions.ram_size"); - size_t actual_ram = getMemoryAmount(); - if (!actual_ram) - throw DB::Exception("ram_size precondition not available on this platform", ErrorCodes::NOT_IMPLEMENTED); - - if (ram_size_needed > actual_ram) - { - std::cerr << "Not enough RAM: need = " << ram_size_needed << ", present = " << actual_ram << std::endl; - return false; - } - } - - if (precondition == "table_exists") - { - String precondition_key = "preconditions.table_exists[" + std::to_string(table_precondition_index++) + "]"; - String table_to_check = config->getString(precondition_key); - String query = "EXISTS TABLE " + table_to_check + ";"; - - size_t exist = 0; - - connection.sendQuery(query, "", QueryProcessingStage::Complete, &settings, nullptr, false); - - while (true) - { - Connection::Packet packet = connection.receivePacket(); - - if (packet.type == Protocol::Server::Data) - { - for (const ColumnWithTypeAndName & column : packet.block) - { - if (column.name == "result" && column.column->size() > 0) - { - exist = column.column->get64(0); - if (exist) - break; - } - } - } - - if (packet.type == Protocol::Server::Exception || packet.type == Protocol::Server::EndOfStream) - break; - } - - if (!exist) - { - std::cerr << "Table " << table_to_check << " doesn't exist" << std::endl; - return false; - } - } - } - - return true; - } - - void processTestsConfigurations(const Paths & input_files) - { - tests_configurations.resize(input_files.size()); - - for (size_t i = 0; i != input_files.size(); ++i) - { - const String path = input_files[i]; - tests_configurations[i] = XMLConfigurationPtr(new XMLConfiguration(path)); - } - - filterConfigurations(); - - if (tests_configurations.size()) - { - Strings outputs; - - for (auto & test_config : tests_configurations) - { - if (!checkPreconditions(test_config)) - { - std::cerr << "Preconditions are not fulfilled for test '" + test_config->getString("name", "") + "' "; - continue; - } - - String output = runTest(test_config); - if (lite_output) - std::cout << output; - else - outputs.push_back(output); - } - - if (!lite_output && outputs.size()) - { - std::cout << "[" << std::endl; - - for (size_t i = 0; i != outputs.size(); ++i) - { - std::cout << outputs[i]; - if (i != outputs.size() - 1) - std::cout << ","; - - std::cout << std::endl; - } - - std::cout << "]" << std::endl; - } - } - } - - void extractSettings( - const XMLConfigurationPtr & config, const String & key, const Strings & settings_list, std::map & settings_to_apply) - { - for (const String & setup : settings_list) - { - if (setup == "profile") - continue; - - String value = config->getString(key + "." + setup); - if (value.empty()) - value = "true"; - - settings_to_apply[setup] = value; - } - } - - String runTest(XMLConfigurationPtr & test_config) - { - queries.clear(); - - test_name = test_config->getString("name"); - std::cerr << "Running: " << test_name << "\n"; - - if (test_config->has("settings")) - { - std::map settings_to_apply; - Keys config_settings; - test_config->keys("settings", config_settings); - - /// Preprocess configuration file - if (std::find(config_settings.begin(), config_settings.end(), "profile") != config_settings.end()) - { - if (!profiles_file.empty()) - { - String profile_name = test_config->getString("settings.profile"); - XMLConfigurationPtr profiles_config(new XMLConfiguration(profiles_file)); - - Keys profile_settings; - profiles_config->keys("profiles." + profile_name, profile_settings); - - extractSettings(profiles_config, "profiles." + profile_name, profile_settings, settings_to_apply); - } - } - - extractSettings(test_config, "settings", config_settings, settings_to_apply); - - /// This macro goes through all settings in the Settings.h - /// and, if found any settings in test's xml configuration - /// with the same name, sets its value to settings - std::map::iterator it; -#define EXTRACT_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) \ - it = settings_to_apply.find(#NAME); \ - if (it != settings_to_apply.end()) \ - settings.set(#NAME, settings_to_apply[#NAME]); - - APPLY_FOR_SETTINGS(EXTRACT_SETTING) - -#undef EXTRACT_SETTING - - if (std::find(config_settings.begin(), config_settings.end(), "average_rows_speed_precision") != config_settings.end()) - { - Stats::avg_rows_speed_precision = test_config->getDouble("settings.average_rows_speed_precision"); - } - - if (std::find(config_settings.begin(), config_settings.end(), "average_bytes_speed_precision") != config_settings.end()) - { - Stats::avg_bytes_speed_precision = test_config->getDouble("settings.average_bytes_speed_precision"); - } - } - - Query query; - - if (!test_config->has("query") && !test_config->has("query_file")) - { - throw DB::Exception("Missing query fields in test's config: " + test_name); - } - - if (test_config->has("query") && test_config->has("query_file")) - { - throw DB::Exception("Found both query and query_file fields. Choose only one"); - } - - if (test_config->has("query")) - { - queries = DB::getMultipleValuesFromConfig(*test_config, "", "query"); - } - - if (test_config->has("query_file")) - { - const String filename = test_config->getString("query_file"); - if (filename.empty()) - throw DB::Exception("Empty file name"); - - bool tsv = fs::path(filename).extension().string() == ".tsv"; - - ReadBufferFromFile query_file(filename); - - if (tsv) - { - while (!query_file.eof()) - { - readEscapedString(query, query_file); - assertChar('\n', query_file); - queries.push_back(query); - } - } - else - { - readStringUntilEOF(query, query_file); - queries.push_back(query); - } - } - - if (queries.empty()) - { - throw DB::Exception("Did not find any query to execute: " + test_name); - } - - if (test_config->has("substitutions")) - { - /// Make "subconfig" of inner xml block - ConfigurationPtr substitutions_view(test_config->createView("substitutions")); - constructSubstitutions(substitutions_view, substitutions); - - auto queries_pre_format = queries; - queries.clear(); - for (const auto & query : queries_pre_format) - { - auto formatted = formatQueries(query, substitutions); - queries.insert(queries.end(), formatted.begin(), formatted.end()); - } - } - - if (!test_config->has("type")) - { - throw DB::Exception("Missing type property in config: " + test_name); - } - - String config_exec_type = test_config->getString("type"); - if (config_exec_type == "loop") - exec_type = ExecutionType::Loop; - else if (config_exec_type == "once") - exec_type = ExecutionType::Once; - else - throw DB::Exception("Unknown type " + config_exec_type + " in :" + test_name); - - times_to_run = test_config->getUInt("times_to_run", 1); - - stop_conditions_by_run.clear(); - TestStopConditions stop_conditions_template; - if (test_config->has("stop_conditions")) - { - ConfigurationPtr stop_conditions_config(test_config->createView("stop_conditions")); - stop_conditions_template.loadFromConfig(stop_conditions_config); - } - - if (stop_conditions_template.empty()) - throw DB::Exception("No termination conditions were found in config"); - - for (size_t i = 0; i < times_to_run * queries.size(); ++i) - stop_conditions_by_run.push_back(stop_conditions_template); - - - ConfigurationPtr metrics_view(test_config->createView("metrics")); - Keys metrics; - metrics_view->keys(metrics); - - main_metric.clear(); - if (test_config->has("main_metric")) - { - Keys main_metrics; - test_config->keys("main_metric", main_metrics); - if (main_metrics.size()) - main_metric = main_metrics[0]; - } - - if (!main_metric.empty()) - { - if (std::find(metrics.begin(), metrics.end(), main_metric) == metrics.end()) - metrics.push_back(main_metric); - } - else - { - if (lite_output) - throw DB::Exception("Specify main_metric for lite output"); - } - - if (metrics.size() > 0) - checkMetricsInput(metrics); - - statistics_by_run.resize(times_to_run * queries.size()); - for (size_t number_of_launch = 0; number_of_launch < times_to_run; ++number_of_launch) - { - QueriesWithIndexes queries_with_indexes; - - for (size_t query_index = 0; query_index < queries.size(); ++query_index) - { - size_t statistic_index = number_of_launch * queries.size() + query_index; - stop_conditions_by_run[statistic_index].reset(); - - queries_with_indexes.push_back({queries[query_index], statistic_index}); - } - - if (interrupt_listener.check()) - gotSIGINT = true; - - if (gotSIGINT) - break; - - runQueries(queries_with_indexes); - } - - if (lite_output) - return minOutput(main_metric); - else - return constructTotalInfo(metrics); - } - - void checkMetricsInput(const Strings & metrics) const - { - std::vector loop_metrics - = {"min_time", "quantiles", "total_time", "queries_per_second", "rows_per_second", "bytes_per_second"}; - - std::vector non_loop_metrics - = {"max_rows_per_second", "max_bytes_per_second", "avg_rows_per_second", "avg_bytes_per_second"}; - - if (exec_type == ExecutionType::Loop) - { - for (const String & metric : metrics) - { - if (std::find(non_loop_metrics.begin(), non_loop_metrics.end(), metric) != non_loop_metrics.end()) - { - throw DB::Exception("Wrong type of metric for loop execution type (" + metric + ")"); - } - } - } - else - { - for (const String & metric : metrics) - { - if (std::find(loop_metrics.begin(), loop_metrics.end(), metric) != loop_metrics.end()) - { - throw DB::Exception("Wrong type of metric for non-loop execution type (" + metric + ")"); - } - } - } - } - - void runQueries(const QueriesWithIndexes & queries_with_indexes) - { - for (const std::pair & query_and_index : queries_with_indexes) - { - Query query = query_and_index.first; - const size_t run_index = query_and_index.second; - - TestStopConditions & stop_conditions = stop_conditions_by_run[run_index]; - Stats & statistics = statistics_by_run[run_index]; - - statistics.clear(); - try - { - execute(query, statistics, stop_conditions); - - if (exec_type == ExecutionType::Loop) - { - for (size_t iteration = 1; !gotSIGINT; ++iteration) - { - stop_conditions.reportIterations(iteration); - if (stop_conditions.areFulfilled()) - break; - - execute(query, statistics, stop_conditions); - } - } - } - catch (const DB::Exception & e) - { - statistics.exception = e.what() + String(", ") + e.displayText(); - } - - if (!gotSIGINT) - { - statistics.ready = true; - } - } - } - - void execute(const Query & query, Stats & statistics, TestStopConditions & stop_conditions) - { - statistics.watch_per_query.restart(); - statistics.last_query_was_cancelled = false; - statistics.last_query_rows_read = 0; - statistics.last_query_bytes_read = 0; - - RemoteBlockInputStream stream(connection, query, {}, global_context, &settings); - - stream.setProgressCallback( - [&](const Progress & value) { this->checkFulfilledConditionsAndUpdate(value, stream, statistics, stop_conditions); }); - - stream.readPrefix(); - while (Block block = stream.read()) - ; - stream.readSuffix(); - - if (!statistics.last_query_was_cancelled) - statistics.updateQueryInfo(); - - statistics.setTotalTime(); - } - - void checkFulfilledConditionsAndUpdate( - const Progress & progress, RemoteBlockInputStream & stream, Stats & statistics, TestStopConditions & stop_conditions) - { - statistics.add(progress.rows, progress.bytes); - - stop_conditions.reportRowsRead(statistics.total_rows_read); - stop_conditions.reportBytesReadUncompressed(statistics.total_bytes_read); - stop_conditions.reportTotalTime(statistics.watch.elapsed() / (1000 * 1000)); - stop_conditions.reportMinTimeNotChangingFor(statistics.min_time_watch.elapsed() / (1000 * 1000)); - stop_conditions.reportMaxSpeedNotChangingFor(statistics.max_rows_speed_watch.elapsed() / (1000 * 1000)); - stop_conditions.reportAverageSpeedNotChangingFor(statistics.avg_rows_speed_watch.elapsed() / (1000 * 1000)); - - if (stop_conditions.areFulfilled()) - { - statistics.last_query_was_cancelled = true; - stream.cancel(false); - } - - if (interrupt_listener.check()) - { - gotSIGINT = true; - statistics.last_query_was_cancelled = true; - stream.cancel(false); - } - } - - void constructSubstitutions(ConfigurationPtr & substitutions_view, StringToVector & substitutions) - { - Keys xml_substitutions; - substitutions_view->keys(xml_substitutions); - - for (size_t i = 0; i != xml_substitutions.size(); ++i) - { - const ConfigurationPtr xml_substitution(substitutions_view->createView("substitution[" + std::to_string(i) + "]")); - - /// Property values for substitution will be stored in a vector - /// accessible by property name - std::vector xml_values; - xml_substitution->keys("values", xml_values); - - String name = xml_substitution->getString("name"); - - for (size_t j = 0; j != xml_values.size(); ++j) - { - substitutions[name].push_back(xml_substitution->getString("values.value[" + std::to_string(j) + "]")); - } - } - } - - std::vector formatQueries(const String & query, StringToVector substitutions) - { - std::vector queries; - - StringToVector::iterator substitutions_first = substitutions.begin(); - StringToVector::iterator substitutions_last = substitutions.end(); - --substitutions_last; - - std::map substitutions_map; - - runThroughAllOptionsAndPush(substitutions_first, substitutions_last, query, queries, substitutions_map); - - return queries; - } - - /// Recursive method which goes through all substitution blocks in xml - /// and replaces property {names} by their values - void runThroughAllOptionsAndPush(StringToVector::iterator substitutions_left, - StringToVector::iterator substitutions_right, - const String & template_query, - std::vector & queries, - const StringKeyValue & template_substitutions_map = StringKeyValue()) - { - String name = substitutions_left->first; - std::vector values = substitutions_left->second; - - for (const String & value : values) - { - /// Copy query string for each unique permutation - Query query = template_query; - StringKeyValue substitutions_map = template_substitutions_map; - size_t substr_pos = 0; - - while (substr_pos != String::npos) - { - substr_pos = query.find("{" + name + "}"); - - if (substr_pos != String::npos) - { - query.replace(substr_pos, 1 + name.length() + 1, value); - } - } - - substitutions_map[name] = value; - - /// If we've reached the end of substitution chain - if (substitutions_left == substitutions_right) - { - queries.push_back(query); - substitutions_maps.push_back(substitutions_map); - } - else - { - StringToVector::iterator next_it = substitutions_left; - ++next_it; - - runThroughAllOptionsAndPush(next_it, substitutions_right, query, queries, substitutions_map); - } - } - } - -public: - String constructTotalInfo(Strings metrics) - { - JSONString json_output; - - json_output.set("hostname", getFQDNOrHostName()); - json_output.set("num_cores", getNumberOfPhysicalCPUCores()); - json_output.set("num_threads", std::thread::hardware_concurrency()); - json_output.set("ram", getMemoryAmount()); - json_output.set("server_version", server_version); - json_output.set("time", DateLUT::instance().timeToString(time(nullptr))); - json_output.set("test_name", test_name); - json_output.set("main_metric", main_metric); - - if (substitutions.size()) - { - JSONString json_parameters(2); /// here, 2 is the size of \t padding - - for (auto it = substitutions.begin(); it != substitutions.end(); ++it) - { - String parameter = it->first; - std::vector values = it->second; - - String array_string = "["; - for (size_t i = 0; i != values.size(); ++i) - { - array_string += '"' + values[i] + '"'; - if (i != values.size() - 1) - { - array_string += ", "; - } - } - array_string += ']'; - - json_parameters.set(parameter, array_string); - } - - json_output.set("parameters", json_parameters.asString()); - } - - std::vector run_infos; - for (size_t query_index = 0; query_index < queries.size(); ++query_index) - { - for (size_t number_of_launch = 0; number_of_launch < times_to_run; ++number_of_launch) - { - Stats & statistics = statistics_by_run[number_of_launch * queries.size() + query_index]; - - if (!statistics.ready) - continue; - - JSONString runJSON; - - runJSON.set("query", queries[query_index]); - if (!statistics.exception.empty()) - runJSON.set("exception", statistics.exception); - - if (substitutions_maps.size()) - { - JSONString parameters(4); - - for (auto it = substitutions_maps[query_index].begin(); it != substitutions_maps[query_index].end(); ++it) - { - parameters.set(it->first, it->second); - } - - runJSON.set("parameters", parameters.asString()); - } - - - if (exec_type == ExecutionType::Loop) - { - /// in seconds - if (std::find(metrics.begin(), metrics.end(), "min_time") != metrics.end()) - runJSON.set("min_time", statistics.min_time / double(1000)); - - if (std::find(metrics.begin(), metrics.end(), "quantiles") != metrics.end()) - { - JSONString quantiles(4); /// here, 4 is the size of \t padding - for (double percent = 10; percent <= 90; percent += 10) - { - String quantile_key = std::to_string(percent / 100.0); - while (quantile_key.back() == '0') - quantile_key.pop_back(); - - quantiles.set(quantile_key, statistics.sampler.quantileInterpolated(percent / 100.0)); - } - quantiles.set("0.95", statistics.sampler.quantileInterpolated(95 / 100.0)); - quantiles.set("0.99", statistics.sampler.quantileInterpolated(99 / 100.0)); - quantiles.set("0.999", statistics.sampler.quantileInterpolated(99.9 / 100.0)); - quantiles.set("0.9999", statistics.sampler.quantileInterpolated(99.99 / 100.0)); - - runJSON.set("quantiles", quantiles.asString()); - } - - if (std::find(metrics.begin(), metrics.end(), "total_time") != metrics.end()) - runJSON.set("total_time", statistics.total_time); - - if (std::find(metrics.begin(), metrics.end(), "queries_per_second") != metrics.end()) - runJSON.set("queries_per_second", double(statistics.queries) / statistics.total_time); - - if (std::find(metrics.begin(), metrics.end(), "rows_per_second") != metrics.end()) - runJSON.set("rows_per_second", double(statistics.total_rows_read) / statistics.total_time); - - if (std::find(metrics.begin(), metrics.end(), "bytes_per_second") != metrics.end()) - runJSON.set("bytes_per_second", double(statistics.total_bytes_read) / statistics.total_time); - } - else - { - if (std::find(metrics.begin(), metrics.end(), "max_rows_per_second") != metrics.end()) - runJSON.set("max_rows_per_second", statistics.max_rows_speed); - - if (std::find(metrics.begin(), metrics.end(), "max_bytes_per_second") != metrics.end()) - runJSON.set("max_bytes_per_second", statistics.max_bytes_speed); - - if (std::find(metrics.begin(), metrics.end(), "avg_rows_per_second") != metrics.end()) - runJSON.set("avg_rows_per_second", statistics.avg_rows_speed_value); - - if (std::find(metrics.begin(), metrics.end(), "avg_bytes_per_second") != metrics.end()) - runJSON.set("avg_bytes_per_second", statistics.avg_bytes_speed_value); - } - - run_infos.push_back(runJSON); - } - } - - json_output.set("runs", run_infos); - - return json_output.asString(); - } - - String minOutput(const String & main_metric) - { - String output; - - for (size_t query_index = 0; query_index < queries.size(); ++query_index) - { - for (size_t number_of_launch = 0; number_of_launch < times_to_run; ++number_of_launch) - { - if (queries.size() > 1) - { - output += "query \"" + queries[query_index] + "\", "; - } - - if (substitutions_maps.size()) - { - for (auto it = substitutions_maps[query_index].begin(); it != substitutions_maps[query_index].end(); ++it) - { - output += it->first + " = " + it->second + ", "; - } - } - - output += "run " + std::to_string(number_of_launch + 1) + ": "; - output += main_metric + " = "; - output += statistics_by_run[number_of_launch * queries.size() + query_index].getStatisticByName(main_metric); - output += "\n"; - } - } - - return output; - } -}; -} - -static void getFilesFromDir(const fs::path & dir, std::vector & input_files, const bool recursive = false) -{ - if (dir.extension().string() == ".xml") - std::cerr << "Warning: '" + dir.string() + "' is a directory, but has .xml extension" << std::endl; - - fs::directory_iterator end; - for (fs::directory_iterator it(dir); it != end; ++it) - { - const fs::path file = (*it); - if (recursive && fs::is_directory(file)) - getFilesFromDir(file, input_files, recursive); - else if (!fs::is_directory(file) && file.extension().string() == ".xml") - input_files.push_back(file.string()); - } -} - -int mainEntryClickHousePerformanceTest(int argc, char ** argv) -try -{ - using boost::program_options::value; - using Strings = std::vector; - - boost::program_options::options_description desc("Allowed options"); - desc.add_options()("help", "produce help message")("lite", "use lite version of output")( - "profiles-file", value()->default_value(""), "Specify a file with global profiles")( - "host,h", value()->default_value("localhost"), "")("port", value()->default_value(9000), "")( - "database", value()->default_value("default"), "")("user", value()->default_value("default"), "")( - "password", value()->default_value(""), "")("tags", value()->multitoken(), "Run only tests with tag")( - "skip-tags", value()->multitoken(), "Do not run tests with tag")("names", - value()->multitoken(), - "Run tests with specific name")("skip-names", value()->multitoken(), "Do not run tests with name")( - "names-regexp", value()->multitoken(), "Run tests with names matching regexp")("skip-names-regexp", - value()->multitoken(), - "Do not run tests with names matching regexp")("recursive,r", "Recurse in directories to find all xml's"); - - /// These options will not be displayed in --help - boost::program_options::options_description hidden("Hidden options"); - hidden.add_options()("input-files", value>(), ""); - - /// But they will be legit, though. And they must be given without name - boost::program_options::positional_options_description positional; - positional.add("input-files", -1); - - boost::program_options::options_description cmdline_options; - cmdline_options.add(desc).add(hidden); - - boost::program_options::variables_map options; - boost::program_options::store( - boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(positional).run(), options); - boost::program_options::notify(options); - - if (options.count("help")) - { - std::cout << "Usage: " << argv[0] << " [options] [test_file ...] [tests_folder]\n"; - std::cout << desc << "\n"; - return 0; - } - - Strings input_files; - bool recursive = options.count("recursive"); - - if (!options.count("input-files")) - { - std::cerr << "Trying to find test scenario files in the current folder..."; - fs::path curr_dir("."); - - getFilesFromDir(curr_dir, input_files, recursive); - - if (input_files.empty()) - { - std::cerr << std::endl; - throw DB::Exception("Did not find any xml files"); - } - else - std::cerr << " found " << input_files.size() << " files." << std::endl; - } - else - { - input_files = options["input-files"].as(); - Strings collected_files; - - for (const String & filename : input_files) - { - fs::path file(filename); - - if (!fs::exists(file)) - throw DB::Exception("File '" + filename + "' does not exist"); - - if (fs::is_directory(file)) - { - getFilesFromDir(file, collected_files, recursive); - } - else - { - if (file.extension().string() != ".xml") - throw DB::Exception("File '" + filename + "' does not have .xml extension"); - collected_files.push_back(filename); - } - } - - input_files = std::move(collected_files); - } - - Strings tests_tags = options.count("tags") ? options["tags"].as() : Strings({}); - Strings skip_tags = options.count("skip-tags") ? options["skip-tags"].as() : Strings({}); - Strings tests_names = options.count("names") ? options["names"].as() : Strings({}); - Strings skip_names = options.count("skip-names") ? options["skip-names"].as() : Strings({}); - Strings tests_names_regexp = options.count("names-regexp") ? options["names-regexp"].as() : Strings({}); - Strings skip_names_regexp = options.count("skip-names-regexp") ? options["skip-names-regexp"].as() : Strings({}); - - auto timeouts = DB::ConnectionTimeouts::getTCPTimeoutsWithoutFailover(DB::Settings()); - - DB::PerformanceTest performanceTest(options["host"].as(), - options["port"].as(), - options["database"].as(), - options["user"].as(), - options["password"].as(), - options.count("lite") > 0, - options["profiles-file"].as(), - std::move(input_files), - std::move(tests_tags), - std::move(skip_tags), - std::move(tests_names), - std::move(skip_names), - std::move(tests_names_regexp), - std::move(skip_names_regexp), - timeouts); - - return 0; -} -catch (...) -{ - std::cout << DB::getCurrentExceptionMessage(/*with stacktrace = */ true) << std::endl; - int code = DB::getCurrentExceptionCode(); - return code ? code : 1; -} diff --git a/dbms/src/Server/clickhouse-benchmark.cpp b/dbms/src/Server/clickhouse-benchmark.cpp deleted file mode 100644 index dfe4798634a..00000000000 --- a/dbms/src/Server/clickhouse-benchmark.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -int mainEntryClickHouseBenchmark(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHouseBenchmark(argc_, argv_); } diff --git a/dbms/src/Server/clickhouse-compressor.cpp b/dbms/src/Server/clickhouse-compressor.cpp deleted file mode 100644 index 403134976a6..00000000000 --- a/dbms/src/Server/clickhouse-compressor.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -int mainEntryClickHouseCompressor(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHouseCompressor(argc_, argv_); } diff --git a/dbms/src/Server/clickhouse-extract-from-config.cpp b/dbms/src/Server/clickhouse-extract-from-config.cpp deleted file mode 100644 index 32aa87651c3..00000000000 --- a/dbms/src/Server/clickhouse-extract-from-config.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -int mainEntryClickHouseExtractFromConfig(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHouseExtractFromConfig(argc_, argv_); } diff --git a/dbms/src/Server/clickhouse-format.cpp b/dbms/src/Server/clickhouse-format.cpp deleted file mode 100644 index ba4c8863e27..00000000000 --- a/dbms/src/Server/clickhouse-format.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -int mainEntryClickHouseFormat(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHouseFormat(argc_, argv_); } diff --git a/dbms/src/Server/clickhouse-local.cpp b/dbms/src/Server/clickhouse-local.cpp deleted file mode 100644 index e3ca32cc108..00000000000 --- a/dbms/src/Server/clickhouse-local.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -int mainEntryClickHouseLocal(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHouseLocal(argc_, argv_); } diff --git a/dbms/src/Server/clickhouse-performance-test.cpp b/dbms/src/Server/clickhouse-performance-test.cpp deleted file mode 100644 index 9e3329469f9..00000000000 --- a/dbms/src/Server/clickhouse-performance-test.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022 PingCAP, Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -int mainEntryClickHousePerformanceTest(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHousePerformanceTest(argc_, argv_); } diff --git a/dbms/src/Server/config_tools.h.in b/dbms/src/Server/config_tools.h.in index 1e54149dab6..03df94cc8e1 100644 --- a/dbms/src/Server/config_tools.h.in +++ b/dbms/src/Server/config_tools.h.in @@ -4,10 +4,5 @@ #cmakedefine01 ENABLE_CLICKHOUSE_SERVER #cmakedefine01 ENABLE_CLICKHOUSE_CLIENT -#cmakedefine01 ENABLE_CLICKHOUSE_LOCAL -#cmakedefine01 ENABLE_CLICKHOUSE_BENCHMARK -#cmakedefine01 ENABLE_CLICKHOUSE_PERFORMANCE -#cmakedefine01 ENABLE_CLICKHOUSE_TOOLS -#cmakedefine01 ENABLE_CLICKHOUSE_COPIER #cmakedefine01 ENABLE_TIFLASH_DTTOOL #cmakedefine01 ENABLE_TIFLASH_DTWORKLOAD diff --git a/dbms/src/Server/main.cpp b/dbms/src/Server/main.cpp index e6bd6d3a180..a5773d6ba4d 100644 --- a/dbms/src/Server/main.cpp +++ b/dbms/src/Server/main.cpp @@ -32,9 +32,6 @@ #if ENABLE_CLICKHOUSE_SERVER #include "Server.h" #endif -#if ENABLE_CLICKHOUSE_LOCAL -#include "LocalServer.h" -#endif #if ENABLE_TIFLASH_DTTOOL #include #endif @@ -51,24 +48,6 @@ int mainEntryClickHouseServer(int argc, char ** argv); #if ENABLE_CLICKHOUSE_CLIENT int mainEntryClickHouseClient(int argc, char ** argv); #endif -#if ENABLE_CLICKHOUSE_LOCAL -int mainEntryClickHouseLocal(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_BENCHMARK -int mainEntryClickHouseBenchmark(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_PERFORMANCE -int mainEntryClickHousePerformanceTest(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_TOOLS -int mainEntryClickHouseExtractFromConfig(int argc, char ** argv); -int mainEntryClickHouseCompressor(int argc, char ** argv); -int mainEntryClickHouseFormat(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_COPIER -int mainEntryClickHouseClusterCopier(int argc, char ** argv); -#endif - #if USE_EMBEDDED_COMPILER int mainEntryClickHouseClang(int argc, char ** argv); int mainEntryClickHouseLLD(int argc, char ** argv); @@ -117,29 +96,12 @@ using MainFunc = int (*)(int, char **); /// Add an item here to register new application std::pair clickhouse_applications[] = { -#if ENABLE_CLICKHOUSE_LOCAL - {"local", mainEntryClickHouseLocal}, -#endif #if ENABLE_CLICKHOUSE_CLIENT {"client", mainEntryClickHouseClient}, #endif -#if ENABLE_CLICKHOUSE_BENCHMARK - {"benchmark", mainEntryClickHouseBenchmark}, -#endif #if ENABLE_CLICKHOUSE_SERVER {"server", mainEntryClickHouseServer}, #endif -#if ENABLE_CLICKHOUSE_PERFORMANCE - {"performance-test", mainEntryClickHousePerformanceTest}, -#endif -#if ENABLE_CLICKHOUSE_TOOLS - {"extract-from-config", mainEntryClickHouseExtractFromConfig}, - {"compressor", mainEntryClickHouseCompressor}, - {"format", mainEntryClickHouseFormat}, -#endif -#if ENABLE_CLICKHOUSE_COPIER - {"copier", mainEntryClickHouseClusterCopier}, -#endif #if USE_EMBEDDED_COMPILER {"clang", mainEntryClickHouseClang}, {"clang++", mainEntryClickHouseClang},