diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fcea488..5a575f7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,9 +17,15 @@ on: description: "Docker images for smoke testing (comma-separated, e.g., ubuntu:20.04,ubuntu:22.04,ubuntu:24.04)" required: false default: "ubuntu:20.04,ubuntu:22.04,ubuntu:24.04" + build_runner: + description: "os in which build steps run on" + required: false + default: "ubuntu-22.04" + type: string jobs: build-source-package: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 + continue-on-error: true strategy: matrix: dist: ${{ fromJSON(vars.BUILD_DISTS) }} @@ -27,6 +33,24 @@ jobs: - uses: actions/checkout@v4 with: path: sources + - name: Validate configure.ac version matches GitHub Release (only on release) + if: github.event.release.tag_name != '' + env: + VERSION: ${{ github.event.release.tag_name }} + run: | + # Extract the current version from configure.ac + CURRENT_VERSION=$(awk -F'[(),]' '/AC_INIT/ {print $3}' sources/configure.ac | tr -d ' ') + + echo "Current configure.ac version: $CURRENT_VERSION" + echo "GitHub Release version: $VERSION" + + # Check if versions match + if [ "$CURRENT_VERSION" != "$VERSION" ]; then + echo "❌ Version mismatch! configure.ac: $CURRENT_VERSION, GitHub Release: $VERSION" + exit 1 # Fail the build + else + echo "Version match. Proceeding with the build." + fi - name: Install dependencies run: | sudo apt-get update && \ @@ -63,7 +87,8 @@ jobs: memtier-benchmark_*.tar.* build-binary-package: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 + continue-on-error: true environment: build strategy: matrix: @@ -121,13 +146,10 @@ jobs: *.deb smoke-test-packages: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: build-binary-package env: ARCH: amd64 - # required by ubuntu:bionic - # https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/ - ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: matrix: image: ${{ fromJSON(vars.SMOKE_TEST_IMAGES) }} @@ -141,15 +163,7 @@ jobs: exit 1 fi echo "BUILD_ARCH=$BUILD_ARCH" >> $GITHUB_ENV - - name: Get binary packages for ubuntu:bionic - if: matrix.image == 'ubuntu:bionic' - uses: actions/download-artifact@v3 - with: - name: binary-${{ env.BUILD_ARCH }}-${{ env.ARCH }} - path: binary-${{ env.BUILD_ARCH }}-${{ env.ARCH }} - - - name: Get binary packages for other versions - if: matrix.image != 'ubuntu:bionic' + - name: Get binary packages uses: actions/download-artifact@v4 with: name: binary-${{ env.BUILD_ARCH }}-${{ env.ARCH }} @@ -162,7 +176,7 @@ jobs: publish-to-apt: env: DEB_S3_VERSION: "0.11.3" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 environment: build needs: smoke-test-packages steps: diff --git a/memtier_benchmark.cpp b/memtier_benchmark.cpp index 61be983..6037b18 100755 --- a/memtier_benchmark.cpp +++ b/memtier_benchmark.cpp @@ -159,7 +159,8 @@ static void config_print(FILE *file, struct benchmark_config *cfg) "wait-ratio = %u:%u\n" "num-slaves = %u-%u\n" "wait-timeout = %u-%u\n" - "json-out-file = %s\n", + "json-out-file = %s\n" + "print-all-runs = %s\n", cfg->server, cfg->port, cfg->unix_socket, @@ -209,7 +210,8 @@ static void config_print(FILE *file, struct benchmark_config *cfg) cfg->wait_ratio.a, cfg->wait_ratio.b, cfg->num_slaves.min, cfg->num_slaves.max, cfg->wait_timeout.min, cfg->wait_timeout.max, - cfg->json_out_file); + cfg->json_out_file, + cfg->print_all_runs ? "yes" : "no"); } static void config_print_to_json(json_handler * jsonhandler, struct benchmark_config *cfg) @@ -267,6 +269,7 @@ static void config_print_to_json(json_handler * jsonhandler, struct benchmark_co jsonhandler->write_obj("wait-ratio" ,"\"%u:%u\"", cfg->wait_ratio.a, cfg->wait_ratio.b); jsonhandler->write_obj("num-slaves" ,"\"%u:%u\"", cfg->num_slaves.min, cfg->num_slaves.max); jsonhandler->write_obj("wait-timeout" ,"\"%u-%u\"", cfg->wait_timeout.min, cfg->wait_timeout.max); + jsonhandler->write_obj("print-all-runs" ,"\"%s\"", cfg->print_all_runs ? "true" : "false"); jsonhandler->close_nesting(); } @@ -403,6 +406,7 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf o_show_config, o_hide_histogram, o_print_percentiles, + o_print_all_runs, o_distinct_client_seed, o_randomize, o_client_stats, @@ -456,6 +460,7 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf { "show-config", 0, 0, o_show_config }, { "hide-histogram", 0, 0, o_hide_histogram }, { "print-percentiles", 1, 0, o_print_percentiles }, + { "print-all-runs", 0, 0, o_print_all_runs }, { "distinct-client-seed", 0, 0, o_distinct_client_seed }, { "randomize", 0, 0, o_randomize }, { "requests", 1, 0, 'n' }, @@ -587,6 +592,9 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf return -1; } break; + case o_print_all_runs: + cfg->print_all_runs = true; + break; case o_distinct_client_seed: cfg->distinct_client_seed++; break; @@ -977,6 +985,7 @@ void usage() { " --show-config Print detailed configuration before running\n" " --hide-histogram Don't print detailed latency histogram\n" " --print-percentiles Specify which percentiles info to print on the results table (by default prints percentiles: 50,99,99.9)\n" + " --print-all-runs When performing multiple test iterations, print and save results for all iterations\n" " --cluster-mode Run client in cluster mode\n" " -h, --help Display this help\n" " -v, --version Display version information\n" @@ -1652,7 +1661,16 @@ int main(int argc, char *argv[]) } // If more than 1 run was used, compute best, worst and average + // Furthermore, if print_all_runs is enabled we save separate histograms per run if (cfg.run_count > 1) { + // User wants to see a separate histogram per run + if (cfg.print_all_runs) { + for (auto i = 0U; i < all_stats.size(); i++) { + auto run_title = std::string("RUN #") + std::to_string(i + 1) + " RESULTS"; + all_stats[i].print(outfile, &cfg, run_title.c_str(), jsonhandler); + } + } + // User wants the best and worst unsigned int min_ops_sec = (unsigned int) -1; unsigned int max_ops_sec = 0; run_stats* worst = NULL; @@ -1669,7 +1687,6 @@ int main(int argc, char *argv[]) best = &(*i); } } - // Best results: best->print(outfile, &cfg, "BEST RUN RESULTS", jsonhandler); // worst results: diff --git a/memtier_benchmark.h b/memtier_benchmark.h index b7a7190..3192cf7 100644 --- a/memtier_benchmark.h +++ b/memtier_benchmark.h @@ -63,6 +63,7 @@ struct benchmark_config { int show_config; int hide_histogram; config_quantiles print_percentiles; + bool print_all_runs; int distinct_client_seed; int randomize; int next_client_idx; diff --git a/tests/tests_oss_simple_flow.py b/tests/tests_oss_simple_flow.py index 2c276bc..5205960 100644 --- a/tests/tests_oss_simple_flow.py +++ b/tests/tests_oss_simple_flow.py @@ -407,6 +407,51 @@ def test_default_set_get_3_runs(env): assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count) + +# run each test on different env +def test_print_all_runs(env): + run_count = 5 + benchmark_specs = {"name": env.testName, "args": ['--print-all-runs','--run-count={}'.format(run_count)]} + addTLSArgs(benchmark_specs, env) + config = get_default_memtier_config() + master_nodes_list = env.getMasterNodesList() + overall_expected_request_count = get_expected_request_count(config) * run_count + + add_required_env_arguments(benchmark_specs, config, env, master_nodes_list) + + # Create a temporary directory + test_dir = tempfile.mkdtemp() + + config = RunConfig(test_dir, env.testName, config, {}) + ensure_clean_benchmark_folder(config.results_dir) + + benchmark = Benchmark.from_json(config, benchmark_specs) + + # benchmark.run() returns True if the return code of memtier_benchmark was 0 + memtier_ok = benchmark.run() + + master_nodes_connections = env.getOSSMasterNodesConnectionList() + merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}} + overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats) + assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count) + + json_filename = '{0}/mb.json'.format(config.results_dir) + ## Assert that all BW metrics are properly stored and calculated + with open(json_filename) as results_json: + results_dict = json.load(results_json) + print_all_runs = results_dict["configuration"]["print-all-runs"] + env.assertTrue(print_all_runs) + for run_count in range(1, run_count+1): + # assert the run infomation exists + env.assertTrue(f"RUN #{run_count} RESULTS" in results_dict) + + # ensure best, worst, and aggregate results are present + env.assertTrue("BEST RUN RESULTS" in results_dict) + env.assertTrue("WORST RUN RESULTS" in results_dict) + env.assertTrue(f"AGGREGATED AVERAGE RESULTS ({run_count} runs)" in results_dict) + # all stats should only exist on a single run json + env.assertTrue("ALL STATS" not in results_dict) + def test_default_arbitrary_command_pubsub(env): benchmark_specs = {"name": env.testName, "args": []} addTLSArgs(benchmark_specs, env)