diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index a8889a16..ade54e06 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -13,6 +13,10 @@ jobs: matrix: # We need 1.10.6 here to check that module works with # old Tarantool versions that don't have "tuple-keydef"/"tuple-merger" support. + # We test old metrics with Tarantool 2.10 because since Tarantool 2.11.1 + # it uses its own metrics package. + # We test old metrics with Cartridge 2.7.9 because since 2.8.0 it + # requires metrics 1.0.0. tarantool-version: ["1.10.6", "1.10", "2.2", "2.3", "2.4", "2.5", "2.6", "2.7", "2.8", "2.10", "2.11"] metrics-version: [""] cartridge-version: ["2.8.0"] @@ -24,16 +28,19 @@ jobs: - tarantool-version: "2.7" remove-merger: true cartridge-version: "2.8.0" - - tarantool-version: "2.11" - metrics-version: "0.1.8" - cartridge-version: "2.8.0" - - tarantool-version: "2.11" + - tarantool-version: "2.10" metrics-version: "0.10.0" - cartridge-version: "2.8.0" + cartridge-version: "2.7.9" - tarantool-version: "2.11" coveralls: true metrics-version: "1.0.0" cartridge-version: "2.8.0" + - tarantool-version: "2.11" + metrics-version: "1.0.0" + vshard-version: "0.1.24" + - tarantool-version: "master" + metrics-version: "1.0.0" + vshard-version: "0.1.24" fail-fast: false # Can't install older versions on 22.04, # see https://github.com/tarantool/setup-tarantool/issues/36 @@ -42,10 +49,42 @@ jobs: - uses: actions/checkout@master - name: Setup Tarantool CE + if: matrix.tarantool-version != 'master' uses: tarantool/setup-tarantool@v2 with: tarantool-version: ${{ matrix.tarantool-version }} + - name: Setup tt + run: | + curl -L https://tarantool.io/release/2/installer.sh | sudo bash + sudo apt install -y tt + tt version + + - name: Get Tarantool master latest commit + if: matrix.tarantool-version == 'master' + run: | + commit_hash=$(git ls-remote https://github.com/tarantool/tarantool.git --branch master | head -c 8) + echo "LATEST_COMMIT=${commit_hash}" >> $GITHUB_ENV + shell: bash + + - name: Cache Tarantool master + if: matrix.tarantool-version == 'master' + id: cache-latest + uses: actions/cache@v3 + with: + path: "${GITHUB_WORKSPACE}/bin" + key: cache-latest-${{ env.LATEST_COMMIT }} + + - name: Setup Tarantool master + if: matrix.tarantool-version == 'master' && steps.cache-latest.outputs.cache-hit != 'true' + run: | + tt init + tt install tarantool master + + - name: Add Tarantool master to PATH + if: matrix.tarantool-version == 'master' + run: echo "${GITHUB_WORKSPACE}/bin" >> $GITHUB_PATH + - name: Fix luarocks in Tarantool CE 1.10.6 if: matrix.tarantool-version == '1.10.6' run: | @@ -57,10 +96,11 @@ jobs: ./deps.sh env: CARTRIDGE_VERSION: ${{ matrix.cartridge-version }} + VSHARD_VERSION: ${{ matrix.vshard-version }} - name: Install metrics if: matrix.metrics-version != '' - run: tarantoolctl rocks install metrics ${{ matrix.metrics-version }} + run: tt rocks install metrics ${{ matrix.metrics-version }} - name: Remove external merger if needed if: ${{ matrix.remove-merger }} @@ -90,23 +130,64 @@ jobs: matrix: tarantool-version: ["1.10", "2.11"] metrics-version: ["1.0.0"] + cartridge-version: ["2.8.0"] + include: + - tarantool-version: "master" + metrics-version: "1.0.0" + vshard-version: "0.1.24" fail-fast: false runs-on: ubuntu-20.04 steps: - uses: actions/checkout@master - name: Setup Tarantool CE + if: matrix.tarantool-version != 'master' uses: tarantool/setup-tarantool@v2 with: tarantool-version: ${{ matrix.tarantool-version }} + - name: Setup tt + run: | + curl -L https://tarantool.io/release/2/installer.sh | sudo bash + sudo apt install -y tt + tt version + + - name: Get Tarantool master latest commit + if: matrix.tarantool-version == 'master' + run: | + commit_hash=$(git ls-remote https://github.com/tarantool/tarantool.git --branch master | head -c 8) + echo "LATEST_COMMIT=${commit_hash}" >> $GITHUB_ENV + shell: bash + + - name: Cache Tarantool master + if: matrix.tarantool-version == 'master' + id: cache-latest + uses: actions/cache@v3 + with: + path: "${GITHUB_WORKSPACE}/bin" + key: cache-latest-${{ env.LATEST_COMMIT }} + + - name: Setup Tarantool master + if: matrix.tarantool-version == 'master' && steps.cache-latest.outputs.cache-hit != 'true' + run: | + tt init + tt install tarantool master + + - name: Add Tarantool master to PATH + if: matrix.tarantool-version == 'master' + run: echo "${GITHUB_WORKSPACE}/bin" >> $GITHUB_PATH + - name: Install requirements for community run: | tarantool --version ./deps.sh + env: + CARTRIDGE_VERSION: ${{ matrix.cartridge-version }} + VSHARD_VERSION: ${{ matrix.vshard-version }} - name: Install metrics - run: tarantoolctl rocks install metrics ${{ matrix.metrics-version }} + if: matrix.metrics-version != '' + run: tt rocks install metrics ${{ matrix.metrics-version }} # This server starts and listen on 8084 port that is used for tests - name: Stop Mono server @@ -127,6 +208,7 @@ jobs: - folder: "2.11" bundle: "tarantool-enterprise-sdk-nogc64-2.11.0-0-r563.linux.x86_64" metrics-version: ["", "1.0.0"] + cartridge-version: ["2.8.0"] fail-fast: false runs-on: ubuntu-20.04 steps: @@ -138,9 +220,13 @@ jobs: tar -xzf ${{ matrix.tarantool-version.bundle }}.tar.gz rm -f ${{ matrix.tarantool-version.bundle }}.tar.gz sudo cp tarantool-enterprise/tarantool /usr/bin/tarantool + sudo rm tarantool-enterprise/tt source tarantool-enterprise/env.sh tarantool --version ./deps.sh + env: + CARTRIDGE_VERSION: ${{ matrix.cartridge-version }} + VSHARD_VERSION: ${{ matrix.vshard-version }} - name: Install metrics if: matrix.metrics-version != '' diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f47c19b..8b1db30a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,15 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## Unreleased +### Changed +* `deps.sh` installs the `vshard` instead of the `cartridge` by default (#364). + You could to specify an environment variable `CARTIRDGE_VERSION` to install + the `cartridge` and run tests cases with it. + ### Fixed * `crud.readview` resource cleanup on garbage collect (#379). +* `doc/playground.lua` does not work with Tarantool 3 (#371). +* Tests with Tarantool 3 (#364). ## [1.3.0] - 27-09-23 diff --git a/deps.sh b/deps.sh index c1b76540..73af055e 100755 --- a/deps.sh +++ b/deps.sh @@ -1,18 +1,29 @@ -#!/bin/sh +#!/usr/bin/env bash # Call this script to install test dependencies set -e +TTCTL=tt +if ! [ -x "$(command -v tt)" ]; +then + if ! [ -x "$(command -v tarantoolctl)" ]; + then + echo "tt or tarantoolctl not found" + exit 1 + fi + TTCTL=tarantoolctl +fi + # Test dependencies: -tarantoolctl rocks install luatest -tarantoolctl rocks install luacheck 0.25.0 -tarantoolctl rocks install luacov 0.13.0 +$TTCTL rocks install luatest +$TTCTL rocks install luacheck 0.25.0 +$TTCTL rocks install luacov 0.13.0 # cluacov, luacov-coveralls and dependencies -tarantoolctl rocks install https://raw.githubusercontent.com/mpeterv/cluacov/master/cluacov-scm-1.rockspec -tarantoolctl rocks install https://raw.githubusercontent.com/LuaDist/dkjson/master/dkjson-2.5-2.rockspec -tarantoolctl rocks install https://raw.githubusercontent.com/keplerproject/luafilesystem/master/luafilesystem-scm-1.rockspec -tarantoolctl rocks install https://raw.githubusercontent.com/moteus/lua-path/master/rockspecs/lua-path-scm-0.rockspec +$TTCTL rocks install https://raw.githubusercontent.com/mpeterv/cluacov/master/cluacov-scm-1.rockspec +$TTCTL rocks install https://raw.githubusercontent.com/LuaDist/dkjson/master/dkjson-2.5-2.rockspec +$TTCTL rocks install https://raw.githubusercontent.com/keplerproject/luafilesystem/master/luafilesystem-scm-1.rockspec +$TTCTL rocks install https://raw.githubusercontent.com/moteus/lua-path/master/rockspecs/lua-path-scm-0.rockspec # Most of this code is the workaround for # https://github.com/moteus/luacov-coveralls/pull/30 @@ -22,14 +33,19 @@ LUACOV_COVERALLS_ROCKSPEC_URL="https://raw.githubusercontent.com/moteus/luacov-c LUACOV_COVERALLS_ROCKSPEC_FILE="${TMPDIR}/luacov-coveralls-scm-0.rockspec" curl -fsSL "${LUACOV_COVERALLS_ROCKSPEC_URL}" > "${LUACOV_COVERALLS_ROCKSPEC_FILE}" sed -i -e 's@git://@git+https://@' "${LUACOV_COVERALLS_ROCKSPEC_FILE}" -tarantoolctl rocks install "${LUACOV_COVERALLS_ROCKSPEC_FILE}" +$TTCTL rocks install "${LUACOV_COVERALLS_ROCKSPEC_FILE}" rm "${LUACOV_COVERALLS_ROCKSPEC_FILE}" rmdir "${TMPDIR}" -CARTRIDGE_VERSION="${CARTRIDGE_VERSION:-2.8.0}" +if [[ -n "$CARTRIDGE_VERSION" ]] +then + $TTCTL rocks install cartridge "$CARTRIDGE_VERSION" + $TTCTL rocks install migrations 0.4.2 +else + VSHARD_VERSION="${VSHARD_VERSION:-0.1.24}" + $TTCTL rocks install vshard "$VSHARD_VERSION" +fi -tarantoolctl rocks install cartridge "$CARTRIDGE_VERSION" -tarantoolctl rocks install ddl 1.6.2 -tarantoolctl rocks install migrations 0.4.2 +$TTCTL rocks install ddl 1.6.2 -tarantoolctl rocks make +$TTCTL rocks make diff --git a/doc/playground.lua b/doc/playground.lua index 3a5925a2..2453c063 100755 --- a/doc/playground.lua +++ b/doc/playground.lua @@ -34,6 +34,13 @@ else fio.rmtree(tempdir) end +local replicaset_uuid +if box.info().replicaset ~= nil then + replicaset_uuid = box.info().replicaset.uuid +else + replicaset_uuid = box.info().cluster.uuid +end + -- Setup vshard. _G.vshard = vshard box.once('guest', function() @@ -43,7 +50,7 @@ local uri = 'guest@localhost:3301' local cfg = { bucket_count = 3000, sharding = { - [box.info().cluster.uuid] = { + [replicaset_uuid] = { replicas = { [box.info().uuid] = { uri = uri, diff --git a/test/entrypoint/srv_batch_operations.lua b/test/entrypoint/srv_batch_operations.lua deleted file mode 100755 index c95ae749..00000000 --- a/test/entrypoint/srv_batch_operations.lua +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') - -package.preload['customers-storage'] = function() - return { - role_name = 'customers-storage', - init = function() - local engine = os.getenv('ENGINE') or 'memtx' - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'age', type = 'number'}, - }, - if_not_exists = true, - engine = engine, - }) - customers_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - customers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - - local developers_space = box.schema.space.create('developers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'login', type = 'string'}, - }, - if_not_exists = true, - engine = engine, - }) - developers_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - developers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - developers_space:create_index('login', { - parts = { {field = 'login'} }, - unique = true, - if_not_exists = true, - }) - end, - } -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'customers-storage', - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - }, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_batch_operations/cartridge_init.lua b/test/entrypoint/srv_batch_operations/cartridge_init.lua new file mode 100755 index 00000000..a5bb8d62 --- /dev/null +++ b/test/entrypoint/srv_batch_operations/cartridge_init.lua @@ -0,0 +1,39 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = require('storage_init'), + } +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'customers-storage', + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + }, +}) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_batch_operations/storage_init.lua b/test/entrypoint/srv_batch_operations/storage_init.lua new file mode 100644 index 00000000..8aaa6ef9 --- /dev/null +++ b/test/entrypoint/srv_batch_operations/storage_init.lua @@ -0,0 +1,51 @@ +return function() + if box.info.ro == true then + return + end + + local engine = os.getenv('ENGINE') or 'memtx' + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'age', type = 'number'}, + }, + if_not_exists = true, + engine = engine, + }) + customers_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + customers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + + local developers_space = box.schema.space.create('developers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'login', type = 'string'}, + }, + if_not_exists = true, + engine = engine, + }) + developers_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + developers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + developers_space:create_index('login', { + parts = { {field = 'login'} }, + unique = true, + if_not_exists = true, + }) +end diff --git a/test/entrypoint/srv_ddl.lua b/test/entrypoint/srv_ddl.lua deleted file mode 100755 index 6e7d9170..00000000 --- a/test/entrypoint/srv_ddl.lua +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') -local ddl = require('ddl') - -package.preload['customers-storage'] = function() - -- set sharding func in dot.notation - -- in _G for sharding func tests - local some_module = { - sharding_func = - function(key) - if key ~= nil and key[1] ~= nil then - return key[1] % 10 - end - end - } - rawset(_G, 'some_module', some_module) - - return { - role_name = 'customers-storage', - init = function() - local engine = os.getenv('ENGINE') or 'memtx' - local customers_schema = { - engine = engine, - is_local = true, - temporary = false, - format = { - {name = 'id', is_nullable = false, type = 'unsigned'}, - {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, - {name = 'name', is_nullable = false, type = 'string'}, - {name = 'age', is_nullable = false, type = 'number'}, - }, - indexes = { - -- This table is intentionally blank. - }, - } - - local primary_index = { - name = 'id', - type = 'TREE', - unique = true, - parts = { - {path = 'id', is_nullable = false, type = 'unsigned'}, - {path = 'name', is_nullable = false, type = 'string'}, - }, - } - local primary_index_id = { - name = 'id', - type = 'TREE', - unique = true, - parts = { - {path = 'id', is_nullable = false, type = 'unsigned'}, - }, - } - local bucket_id_index = { - name = 'bucket_id', - type = 'TREE', - unique = false, - parts = { - {path = 'bucket_id', is_nullable = false, type = 'unsigned'}, - } - } - local name_index = { - name = 'name', - type = 'TREE', - unique = true, - parts = { - {path = 'name', is_nullable = false, type = 'string'}, - }, - } - local age_index = { - name = 'age', - type = 'TREE', - unique = false, - parts = { - {path = 'age', is_nullable = false, type = 'number'}, - }, - } - local secondary_index = { - name = 'secondary', - type = 'TREE', - unique = false, - parts = { - {path = 'id', is_nullable = false, type = 'unsigned'}, - {path = 'name', is_nullable = false, type = 'string'}, - }, - } - - local three_fields_index = { - name = 'three_fields', - type = 'TREE', - unique = false, - parts = { - {path = 'age', is_nullable = false, type = 'number'}, - {path = 'name', is_nullable = false, type = 'string'}, - {path = 'id', is_nullable = false, type = 'unsigned'}, - }, - } - - local customers_id_schema = table.deepcopy(customers_schema) - customers_id_schema.sharding_key = {'id'} - table.insert(customers_id_schema.indexes, primary_index_id) - table.insert(customers_id_schema.indexes, bucket_id_index) - table.insert(customers_id_schema.indexes, age_index) - - local customers_name_key_schema = table.deepcopy(customers_schema) - customers_name_key_schema.sharding_key = {'name'} - table.insert(customers_name_key_schema.indexes, primary_index) - table.insert(customers_name_key_schema.indexes, bucket_id_index) - - local customers_name_key_uniq_index_schema = table.deepcopy(customers_schema) - customers_name_key_uniq_index_schema.sharding_key = {'name'} - table.insert(customers_name_key_uniq_index_schema.indexes, primary_index) - table.insert(customers_name_key_uniq_index_schema.indexes, bucket_id_index) - table.insert(customers_name_key_uniq_index_schema.indexes, name_index) - - local customers_name_key_non_uniq_index_schema = table.deepcopy(customers_schema) - customers_name_key_non_uniq_index_schema.sharding_key = {'name'} - name_index.unique = false - table.insert(customers_name_key_non_uniq_index_schema.indexes, primary_index) - table.insert(customers_name_key_non_uniq_index_schema.indexes, bucket_id_index) - table.insert(customers_name_key_non_uniq_index_schema.indexes, name_index) - - local customers_secondary_idx_name_key_schema = table.deepcopy(customers_schema) - customers_secondary_idx_name_key_schema.sharding_key = {'name'} - table.insert(customers_secondary_idx_name_key_schema.indexes, primary_index_id) - table.insert(customers_secondary_idx_name_key_schema.indexes, secondary_index) - table.insert(customers_secondary_idx_name_key_schema.indexes, bucket_id_index) - - local customers_age_key_schema = table.deepcopy(customers_schema) - customers_age_key_schema.sharding_key = {'age'} - table.insert(customers_age_key_schema.indexes, primary_index) - table.insert(customers_age_key_schema.indexes, bucket_id_index) - - local customers_name_age_key_different_indexes_schema = table.deepcopy(customers_schema) - customers_name_age_key_different_indexes_schema.sharding_key = {'name', 'age'} - table.insert(customers_name_age_key_different_indexes_schema.indexes, primary_index) - table.insert(customers_name_age_key_different_indexes_schema.indexes, bucket_id_index) - table.insert(customers_name_age_key_different_indexes_schema.indexes, age_index) - - local customers_name_age_key_three_fields_index_schema = table.deepcopy(customers_schema) - customers_name_age_key_three_fields_index_schema.sharding_key = {'name', 'age'} - table.insert(customers_name_age_key_three_fields_index_schema.indexes, primary_index_id) - table.insert(customers_name_age_key_three_fields_index_schema.indexes, bucket_id_index) - table.insert(customers_name_age_key_three_fields_index_schema.indexes, three_fields_index) - - local customers_id_key_schema = table.deepcopy(customers_schema) - customers_id_key_schema.sharding_key = {'id'} - table.insert(customers_id_key_schema.indexes, primary_index) - table.insert(customers_id_key_schema.indexes, bucket_id_index) - table.insert(customers_id_key_schema.indexes, name_index) - - local customers_body_func_schema = table.deepcopy(customers_id_key_schema) - customers_body_func_schema.sharding_func = { body = 'function(key) return key[1] % 10 end' } - - local customers_G_func_schema = table.deepcopy(customers_id_key_schema) - customers_G_func_schema.sharding_func = 'some_module.sharding_func' - - local customers_empty_sharding_func_schema = table.deepcopy(customers_id_key_schema) - - local customers_vshard_mpcrc32_schema = table.deepcopy(customers_id_key_schema) - customers_vshard_mpcrc32_schema.sharding_func = 'vshard.router.bucket_id_mpcrc32' - - local customers_vshard_strcrc32_schema = table.deepcopy(customers_id_key_schema) - customers_vshard_strcrc32_schema.sharding_func = 'vshard.router.bucket_id_strcrc32' - - local schema = { - spaces = { - customers = customers_id_schema, - customers_name_key = customers_name_key_schema, - customers_name_key_uniq_index = customers_name_key_uniq_index_schema, - customers_name_key_non_uniq_index = customers_name_key_non_uniq_index_schema, - customers_secondary_idx_name_key = customers_secondary_idx_name_key_schema, - customers_age_key = customers_age_key_schema, - customers_name_age_key_different_indexes = customers_name_age_key_different_indexes_schema, - customers_name_age_key_three_fields_index = customers_name_age_key_three_fields_index_schema, - customers_G_func = customers_G_func_schema, - customers_body_func = customers_body_func_schema, - customers_empty_sharding_func = customers_empty_sharding_func_schema, - customers_vshard_mpcrc32 = customers_vshard_mpcrc32_schema, - customers_vshard_strcrc32 = customers_vshard_strcrc32_schema, - } - } - - if not box.info.ro then - local ok, err = ddl.set_schema(schema) - if not ok then - error(err) - end - end - - rawset(_G, 'set_sharding_key', function(space_name, sharding_key_def) - local fieldno_sharding_key = 2 - box.space['_ddl_sharding_key']:update(space_name, {{'=', fieldno_sharding_key, sharding_key_def}}) - end) - rawset(_G, 'set_sharding_func', function(space_name, fieldno_sharding_func, sharding_func_def) - local record = {space_name, box.NULL, box.NULL} - record[fieldno_sharding_func] = sharding_func_def - box.space['_ddl_sharding_func']:replace(record) - end) - end, - } -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'customers-storage', - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - }}, - -- Increase readahead for performance tests. - -- Performance tests on HP ProBook 440 G5 16 Gb - -- bump into default readahead limit and thus not - -- give a full picture. - { readahead = 20 * 1024 * 1024 } -) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_ddl/cartridge_init.lua b/test/entrypoint/srv_ddl/cartridge_init.lua new file mode 100755 index 00000000..df5d952c --- /dev/null +++ b/test/entrypoint/srv_ddl/cartridge_init.lua @@ -0,0 +1,46 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + -- set sharding func in dot.notation + -- in _G for sharding func tests + return { + role_name = 'customers-storage', + init = require('storage_init'), + } +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'customers-storage', + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + }}, + -- Increase readahead for performance tests. + -- Performance tests on HP ProBook 440 G5 16 Gb + -- bump into default readahead limit and thus not + -- give a full picture. + { readahead = 20 * 1024 * 1024 } +) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_ddl/router_init.lua b/test/entrypoint/srv_ddl/router_init.lua new file mode 100644 index 00000000..bc30930e --- /dev/null +++ b/test/entrypoint/srv_ddl/router_init.lua @@ -0,0 +1,11 @@ +return function() + local some_module = { + sharding_func = + function(key) + if key ~= nil and key[1] ~= nil then + return key[1] % 10 + end + end + } + rawset(_G, 'some_module', some_module) +end diff --git a/test/entrypoint/srv_ddl/storage_init.lua b/test/entrypoint/srv_ddl/storage_init.lua new file mode 100644 index 00000000..57ea212e --- /dev/null +++ b/test/entrypoint/srv_ddl/storage_init.lua @@ -0,0 +1,193 @@ +local ddl = require('ddl') + +local some_module = { + sharding_func = + function(key) + if key ~= nil and key[1] ~= nil then + return key[1] % 10 + end + end +} +rawset(_G, 'some_module', some_module) + +return function() + local engine = os.getenv('ENGINE') or 'memtx' + local customers_schema = { + engine = engine, + is_local = true, + temporary = false, + format = { + {name = 'id', is_nullable = false, type = 'unsigned'}, + {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, + {name = 'name', is_nullable = false, type = 'string'}, + {name = 'age', is_nullable = false, type = 'number'}, + }, + indexes = { + -- This table is intentionally blank. + }, + } + + local primary_index = { + name = 'id', + type = 'TREE', + unique = true, + parts = { + {path = 'id', is_nullable = false, type = 'unsigned'}, + {path = 'name', is_nullable = false, type = 'string'}, + }, + } + local primary_index_id = { + name = 'id', + type = 'TREE', + unique = true, + parts = { + {path = 'id', is_nullable = false, type = 'unsigned'}, + }, + } + local bucket_id_index = { + name = 'bucket_id', + type = 'TREE', + unique = false, + parts = { + {path = 'bucket_id', is_nullable = false, type = 'unsigned'}, + } + } + local name_index = { + name = 'name', + type = 'TREE', + unique = true, + parts = { + {path = 'name', is_nullable = false, type = 'string'}, + }, + } + local age_index = { + name = 'age', + type = 'TREE', + unique = false, + parts = { + {path = 'age', is_nullable = false, type = 'number'}, + }, + } + local secondary_index = { + name = 'secondary', + type = 'TREE', + unique = false, + parts = { + {path = 'id', is_nullable = false, type = 'unsigned'}, + {path = 'name', is_nullable = false, type = 'string'}, + }, + } + + local three_fields_index = { + name = 'three_fields', + type = 'TREE', + unique = false, + parts = { + {path = 'age', is_nullable = false, type = 'number'}, + {path = 'name', is_nullable = false, type = 'string'}, + {path = 'id', is_nullable = false, type = 'unsigned'}, + }, + } + + local customers_id_schema = table.deepcopy(customers_schema) + customers_id_schema.sharding_key = {'id'} + table.insert(customers_id_schema.indexes, primary_index_id) + table.insert(customers_id_schema.indexes, bucket_id_index) + table.insert(customers_id_schema.indexes, age_index) + + local customers_name_key_schema = table.deepcopy(customers_schema) + customers_name_key_schema.sharding_key = {'name'} + table.insert(customers_name_key_schema.indexes, primary_index) + table.insert(customers_name_key_schema.indexes, bucket_id_index) + + local customers_name_key_uniq_index_schema = table.deepcopy(customers_schema) + customers_name_key_uniq_index_schema.sharding_key = {'name'} + table.insert(customers_name_key_uniq_index_schema.indexes, primary_index) + table.insert(customers_name_key_uniq_index_schema.indexes, bucket_id_index) + table.insert(customers_name_key_uniq_index_schema.indexes, name_index) + + local customers_name_key_non_uniq_index_schema = table.deepcopy(customers_schema) + customers_name_key_non_uniq_index_schema.sharding_key = {'name'} + name_index.unique = false + table.insert(customers_name_key_non_uniq_index_schema.indexes, primary_index) + table.insert(customers_name_key_non_uniq_index_schema.indexes, bucket_id_index) + table.insert(customers_name_key_non_uniq_index_schema.indexes, name_index) + + local customers_secondary_idx_name_key_schema = table.deepcopy(customers_schema) + customers_secondary_idx_name_key_schema.sharding_key = {'name'} + table.insert(customers_secondary_idx_name_key_schema.indexes, primary_index_id) + table.insert(customers_secondary_idx_name_key_schema.indexes, secondary_index) + table.insert(customers_secondary_idx_name_key_schema.indexes, bucket_id_index) + + local customers_age_key_schema = table.deepcopy(customers_schema) + customers_age_key_schema.sharding_key = {'age'} + table.insert(customers_age_key_schema.indexes, primary_index) + table.insert(customers_age_key_schema.indexes, bucket_id_index) + + local customers_name_age_key_different_indexes_schema = table.deepcopy(customers_schema) + customers_name_age_key_different_indexes_schema.sharding_key = {'name', 'age'} + table.insert(customers_name_age_key_different_indexes_schema.indexes, primary_index) + table.insert(customers_name_age_key_different_indexes_schema.indexes, bucket_id_index) + table.insert(customers_name_age_key_different_indexes_schema.indexes, age_index) + + local customers_name_age_key_three_fields_index_schema = table.deepcopy(customers_schema) + customers_name_age_key_three_fields_index_schema.sharding_key = {'name', 'age'} + table.insert(customers_name_age_key_three_fields_index_schema.indexes, primary_index_id) + table.insert(customers_name_age_key_three_fields_index_schema.indexes, bucket_id_index) + table.insert(customers_name_age_key_three_fields_index_schema.indexes, three_fields_index) + + local customers_id_key_schema = table.deepcopy(customers_schema) + customers_id_key_schema.sharding_key = {'id'} + table.insert(customers_id_key_schema.indexes, primary_index) + table.insert(customers_id_key_schema.indexes, bucket_id_index) + table.insert(customers_id_key_schema.indexes, name_index) + + local customers_body_func_schema = table.deepcopy(customers_id_key_schema) + customers_body_func_schema.sharding_func = { body = 'function(key) return key[1] % 10 end' } + + local customers_G_func_schema = table.deepcopy(customers_id_key_schema) + customers_G_func_schema.sharding_func = 'some_module.sharding_func' + + local customers_empty_sharding_func_schema = table.deepcopy(customers_id_key_schema) + + local customers_vshard_mpcrc32_schema = table.deepcopy(customers_id_key_schema) + customers_vshard_mpcrc32_schema.sharding_func = 'vshard.router.bucket_id_mpcrc32' + + local customers_vshard_strcrc32_schema = table.deepcopy(customers_id_key_schema) + customers_vshard_strcrc32_schema.sharding_func = 'vshard.router.bucket_id_strcrc32' + + local schema = { + spaces = { + customers = customers_id_schema, + customers_name_key = customers_name_key_schema, + customers_name_key_uniq_index = customers_name_key_uniq_index_schema, + customers_name_key_non_uniq_index = customers_name_key_non_uniq_index_schema, + customers_secondary_idx_name_key = customers_secondary_idx_name_key_schema, + customers_age_key = customers_age_key_schema, + customers_name_age_key_different_indexes = customers_name_age_key_different_indexes_schema, + customers_name_age_key_three_fields_index = customers_name_age_key_three_fields_index_schema, + customers_G_func = customers_G_func_schema, + customers_body_func = customers_body_func_schema, + customers_empty_sharding_func = customers_empty_sharding_func_schema, + customers_vshard_mpcrc32 = customers_vshard_mpcrc32_schema, + customers_vshard_strcrc32 = customers_vshard_strcrc32_schema, + } + } + + if not box.info.ro then + local ok, err = ddl.set_schema(schema) + if not ok then + error(err) + end + end + + rawset(_G, 'set_sharding_key', function(space_name, sharding_key_def) + local fieldno_sharding_key = 2 + box.space['_ddl_sharding_key']:update(space_name, {{'=', fieldno_sharding_key, sharding_key_def}}) + end) + rawset(_G, 'set_sharding_func', function(space_name, fieldno_sharding_func, sharding_func_def) + local record = {space_name, box.NULL, box.NULL} + record[fieldno_sharding_func] = sharding_func_def + box.space['_ddl_sharding_func']:replace(record) + end) +end diff --git a/test/entrypoint/srv_ddl_reload.lua b/test/entrypoint/srv_ddl_reload.lua deleted file mode 100755 index b6e2300a..00000000 --- a/test/entrypoint/srv_ddl_reload.lua +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') -local ddl = require('ddl') - -local crud_utils = require('crud.common.utils') - -package.preload['customers-storage'] = function() - local customers_module = { - sharding_func_default = function(key) - local id = key[1] - assert(id ~= nil) - - return id % 3000 + 1 - end, - sharding_func_new = function(key) - local id = key[1] - assert(id ~= nil) - - return (id + 42) % 3000 + 1 - end, - } - rawset(_G, 'customers_module', customers_module) - - return { - role_name = 'customers-storage', - init = function() - local engine = os.getenv('ENGINE') or 'memtx' - - local customers_schema_raw = { - engine = engine, - temporary = false, - is_local = false, - format = { - {name = 'id', is_nullable = false, type = 'unsigned'}, - {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, - {name = 'name', is_nullable = false, type = 'string'}, - {name = 'age', is_nullable = false, type = 'number'}, - }, - indexes = { - { - name = 'id', - type = 'TREE', - unique = true, - parts = { - {path = 'id', is_nullable = false, type = 'unsigned'}, - }, - }, - { - name = 'bucket_id', - type = 'TREE', - unique = false, - parts = { - {path = 'bucket_id', is_nullable = false, type = 'unsigned'}, - }, - }, - { - name = 'name', - type = 'TREE', - unique = false, - parts = { - {path = 'name', is_nullable = false, type = 'string'}, - }, - }, - { - name = 'age', - type = 'TREE', - unique = false, - parts = { - {path = 'age', is_nullable = false, type = 'number'}, - }, - }, - }, - sharding_key = { 'name' }, - } - - local customers_schema = table.deepcopy(customers_schema_raw) - customers_schema.sharding_key = { 'name' } - - local customers_pk_schema = table.deepcopy(customers_schema_raw) - customers_pk_schema.sharding_key = { 'id' } - customers_pk_schema.sharding_func = 'customers_module.sharding_func_default' - - local schema = { - spaces = { - ['customers'] = customers_schema, - ['customers_pk'] = customers_pk_schema, - } - } - - - rawset(_G, 'reset_to_default_schema', function() - if box.info.ro == true then - return - end - - if box.space['_ddl_sharding_key'] ~= nil then - box.space['_ddl_sharding_key']:truncate() - box.space['_ddl_sharding_key']:insert{'customers', customers_schema.sharding_key} - box.space['_ddl_sharding_key']:insert{'customers_pk', customers_pk_schema.sharding_key} - end - - if box.space['_ddl_sharding_func'] ~= nil then - box.space['_ddl_sharding_func']:truncate() - box.space['_ddl_sharding_func']:insert{'customers_pk', customers_pk_schema.sharding_func, box.NULL} - end - - local _, err = ddl.set_schema(schema) - if err ~= nil then - error(err) - end - end) - - rawset(_G, 'set_sharding_key', function(space_name, sharding_key_def) - if box.info.ro == true then - return - end - - local current_schema, err = ddl.get_schema() - if err ~= nil then - error(err) - end - - box.space['_ddl_sharding_key']:replace{space_name, sharding_key_def} - current_schema.spaces[space_name].sharding_key = sharding_key_def - - local _, err = ddl.set_schema(current_schema) - if err ~= nil then - error(err) - end - end) - - rawset(_G, 'set_sharding_func_name', function(space_name, sharding_func_name) - if box.info.ro == true then - return - end - - local current_schema, err = ddl.get_schema() - if err ~= nil then - error(err) - end - - local t = {space_name, sharding_func_name, box.NULL} - box.space['_ddl_sharding_func']:replace(t) - current_schema.spaces[space_name].sharding_func = sharding_func_name - - local _, err = ddl.set_schema(current_schema) - if err ~= nil then - error(err) - end - end) - - rawset(_G, 'set_sharding_func_body', function(space_name, sharding_func_body) - if box.info.ro == true then - return - end - - local current_schema, err = ddl.get_schema() - if err ~= nil then - error(err) - end - - local t = {space_name, box.NULL, sharding_func_body} - box.space['_ddl_sharding_func']:replace(t) - current_schema.spaces[space_name].sharding_func = { body = sharding_func_body } - - local _, err = ddl.set_schema(current_schema) - if err ~= nil then - error(err) - end - end) - - rawset(_G, 'create_new_space', function() - if box.info.ro == true then - return - end - - local new_schema = table.deepcopy(schema) - new_schema.spaces['customers_new'] = table.deepcopy(customers_schema_raw) - new_schema.spaces['customers_new'].sharding_func = { - body = [[ - function(key) - local vshard = require('vshard') - return vshard.router.bucket_id_mpcrc32(key) - end - ]] - } - - local _, err = ddl.set_schema(new_schema) - if err ~= nil then - error(err) - end - end) - end, - } -end - -local roles_reload_allowed = nil -if crud_utils.is_cartridge_hotreload_supported() then - roles_reload_allowed = true -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'customers-storage', - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - }, - roles_reload_allowed = roles_reload_allowed, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_ddl_reload/cartridge_init.lua b/test/entrypoint/srv_ddl_reload/cartridge_init.lua new file mode 100755 index 00000000..c847d322 --- /dev/null +++ b/test/entrypoint/srv_ddl_reload/cartridge_init.lua @@ -0,0 +1,63 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +local crud_utils = require('crud.common.utils') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + local customers_module = { + sharding_func_default = function(key) + local id = key[1] + assert(id ~= nil) + + return id % 3000 + 1 + end, + sharding_func_new = function(key) + local id = key[1] + assert(id ~= nil) + + return (id + 42) % 3000 + 1 + end, + } + rawset(_G, 'customers_module', customers_module) + + return { + role_name = 'customers-storage', + init = require('storage_init') + } +end + +local roles_reload_allowed = nil +if crud_utils.is_cartridge_hotreload_supported() then + roles_reload_allowed = true +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'customers-storage', + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + }, + roles_reload_allowed = roles_reload_allowed, +}) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_ddl_reload/router_init.lua b/test/entrypoint/srv_ddl_reload/router_init.lua new file mode 100644 index 00000000..f64cd4c8 --- /dev/null +++ b/test/entrypoint/srv_ddl_reload/router_init.lua @@ -0,0 +1,17 @@ +return function() + local customers_module = { + sharding_func_default = function(key) + local id = key[1] + assert(id ~= nil) + + return id % 3000 + 1 + end, + sharding_func_new = function(key) + local id = key[1] + assert(id ~= nil) + + return (id + 42) % 3000 + 1 + end, + } + rawset(_G, 'customers_module', customers_module) +end diff --git a/test/entrypoint/srv_ddl_reload/storage_init.lua b/test/entrypoint/srv_ddl_reload/storage_init.lua new file mode 100644 index 00000000..203b0767 --- /dev/null +++ b/test/entrypoint/srv_ddl_reload/storage_init.lua @@ -0,0 +1,186 @@ +local ddl = require('ddl') + +local customers_module = { + sharding_func_default = function(key) + local id = key[1] + assert(id ~= nil) + + return id % 3000 + 1 + end, + sharding_func_new = function(key) + local id = key[1] + assert(id ~= nil) + + return (id + 42) % 3000 + 1 + end, +} +rawset(_G, 'customers_module', customers_module) + +return function() + local engine = os.getenv('ENGINE') or 'memtx' + + local customers_schema_raw = { + engine = engine, + temporary = false, + is_local = false, + format = { + {name = 'id', is_nullable = false, type = 'unsigned'}, + {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, + {name = 'name', is_nullable = false, type = 'string'}, + {name = 'age', is_nullable = false, type = 'number'}, + }, + indexes = { + { + name = 'id', + type = 'TREE', + unique = true, + parts = { + {path = 'id', is_nullable = false, type = 'unsigned'}, + }, + }, + { + name = 'bucket_id', + type = 'TREE', + unique = false, + parts = { + {path = 'bucket_id', is_nullable = false, type = 'unsigned'}, + }, + }, + { + name = 'name', + type = 'TREE', + unique = false, + parts = { + {path = 'name', is_nullable = false, type = 'string'}, + }, + }, + { + name = 'age', + type = 'TREE', + unique = false, + parts = { + {path = 'age', is_nullable = false, type = 'number'}, + }, + }, + }, + sharding_key = { 'name' }, + } + + local customers_schema = table.deepcopy(customers_schema_raw) + customers_schema.sharding_key = { 'name' } + + local customers_pk_schema = table.deepcopy(customers_schema_raw) + customers_pk_schema.sharding_key = { 'id' } + customers_pk_schema.sharding_func = 'customers_module.sharding_func_default' + + local schema = { + spaces = { + ['customers'] = customers_schema, + ['customers_pk'] = customers_pk_schema, + } + } + + + rawset(_G, 'reset_to_default_schema', function() + if box.info.ro == true then + return + end + + if box.space['_ddl_sharding_key'] ~= nil then + box.space['_ddl_sharding_key']:truncate() + box.space['_ddl_sharding_key']:insert{'customers', customers_schema.sharding_key} + box.space['_ddl_sharding_key']:insert{'customers_pk', customers_pk_schema.sharding_key} + end + + if box.space['_ddl_sharding_func'] ~= nil then + box.space['_ddl_sharding_func']:truncate() + box.space['_ddl_sharding_func']:insert{'customers_pk', customers_pk_schema.sharding_func, box.NULL} + end + + local _, err = ddl.set_schema(schema) + if err ~= nil then + error(err) + end + end) + + rawset(_G, 'set_sharding_key', function(space_name, sharding_key_def) + if box.info.ro == true then + return + end + + local current_schema, err = ddl.get_schema() + if err ~= nil then + error(err) + end + + box.space['_ddl_sharding_key']:replace{space_name, sharding_key_def} + current_schema.spaces[space_name].sharding_key = sharding_key_def + + local _, err = ddl.set_schema(current_schema) + if err ~= nil then + error(err) + end + end) + + rawset(_G, 'set_sharding_func_name', function(space_name, sharding_func_name) + if box.info.ro == true then + return + end + + local current_schema, err = ddl.get_schema() + if err ~= nil then + error(err) + end + + local t = {space_name, sharding_func_name, box.NULL} + box.space['_ddl_sharding_func']:replace(t) + current_schema.spaces[space_name].sharding_func = sharding_func_name + + local _, err = ddl.set_schema(current_schema) + if err ~= nil then + error(err) + end + end) + + rawset(_G, 'set_sharding_func_body', function(space_name, sharding_func_body) + if box.info.ro == true then + return + end + + local current_schema, err = ddl.get_schema() + if err ~= nil then + error(err) + end + + local t = {space_name, box.NULL, sharding_func_body} + box.space['_ddl_sharding_func']:replace(t) + current_schema.spaces[space_name].sharding_func = { body = sharding_func_body } + + local _, err = ddl.set_schema(current_schema) + if err ~= nil then + error(err) + end + end) + + rawset(_G, 'create_new_space', function() + if box.info.ro == true then + return + end + + local new_schema = table.deepcopy(schema) + new_schema.spaces['customers_new'] = table.deepcopy(customers_schema_raw) + new_schema.spaces['customers_new'].sharding_func = { + body = [[ + function(key) + local vshard = require('vshard') + return vshard.router.bucket_id_mpcrc32(key) + end + ]] + } + + local _, err = ddl.set_schema(new_schema) + if err ~= nil then + error(err) + end + end) +end diff --git a/test/entrypoint/srv_migration.lua b/test/entrypoint/srv_migration/cartridge_init.lua similarity index 52% rename from test/entrypoint/srv_migration.lua rename to test/entrypoint/srv_migration/cartridge_init.lua index b72e9c69..37c18409 100755 --- a/test/entrypoint/srv_migration.lua +++ b/test/entrypoint/srv_migration/cartridge_init.lua @@ -7,23 +7,16 @@ local log = require('log') local errors = require('errors') local cartridge = require('cartridge') +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + package.preload['customers-storage'] = function() return { role_name = 'customers-storage', - init = function(opts) - if opts.is_master then - box.schema.space.create('customers') - - box.space['customers']:format{ - {name = 'id', is_nullable = false, type = 'unsigned'}, - {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, - {name = 'sharding_key', is_nullable = false, type = 'unsigned'}, - } - - box.space['customers']:create_index('pk', {parts = { 'id' }}) - box.space['customers']:create_index('bucket_id', {parts = { 'bucket_id' }}) - end - end, + init = require('storage_init') } end diff --git a/test/entrypoint/srv_migration/storage_init.lua b/test/entrypoint/srv_migration/storage_init.lua new file mode 100644 index 00000000..71d94d71 --- /dev/null +++ b/test/entrypoint/srv_migration/storage_init.lua @@ -0,0 +1,15 @@ +return function() + if box.info.ro == true then + return + end + box.schema.space.create('customers') + + box.space['customers']:format{ + {name = 'id', is_nullable = false, type = 'unsigned'}, + {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, + {name = 'sharding_key', is_nullable = false, type = 'unsigned'}, + } + + box.space['customers']:create_index('pk', {parts = { 'id' }}) + box.space['customers']:create_index('bucket_id', {parts = { 'bucket_id' }}) +end diff --git a/test/entrypoint/srv_not_initialized.lua b/test/entrypoint/srv_not_initialized/cartridge_init.lua similarity index 51% rename from test/entrypoint/srv_not_initialized.lua rename to test/entrypoint/srv_not_initialized/cartridge_init.lua index 92ee4a11..7d82a7b1 100755 --- a/test/entrypoint/srv_not_initialized.lua +++ b/test/entrypoint/srv_not_initialized/cartridge_init.lua @@ -7,29 +7,16 @@ local log = require('log') local errors = require('errors') local cartridge = require('cartridge') +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + package.preload['customers-storage'] = function() return { role_name = 'customers-storage', - init = function() - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'age', type = 'number'}, - }, - if_not_exists = true, - }) - customers_space:create_index('id', { - parts = { {field = 'id' } }, - if_not_exists = true, - }) - customers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id' } }, - unique = false, - if_not_exists = true, - }) - end, + init = require('storage_init'), dependencies = {'cartridge.roles.vshard-storage'}, } end diff --git a/test/entrypoint/srv_not_initialized/storage_init.lua b/test/entrypoint/srv_not_initialized/storage_init.lua new file mode 100644 index 00000000..c4a74517 --- /dev/null +++ b/test/entrypoint/srv_not_initialized/storage_init.lua @@ -0,0 +1,24 @@ +return function() + if box.info.ro == true then + return + end + + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'age', type = 'number'}, + }, + if_not_exists = true, + }) + customers_space:create_index('id', { + parts = { {field = 'id' } }, + if_not_exists = true, + }) + customers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id' } }, + unique = false, + if_not_exists = true, + }) +end diff --git a/test/entrypoint/srv_read_calls_strategies.lua b/test/entrypoint/srv_read_calls_strategies.lua deleted file mode 100755 index 29e1703f..00000000 --- a/test/entrypoint/srv_read_calls_strategies.lua +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') - -package.preload['customers-storage'] = function() - return { - role_name = 'customers-storage', - init = function() - local engine = os.getenv('ENGINE') or 'memtx' - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'age', type = 'number'}, - }, - if_not_exists = true, - engine = engine, - }) - customers_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - end, - } -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'customers-storage', - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - }, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -rawset(_G, 'vshard_calls', {}) - -rawset(_G, 'clear_vshard_calls', function() - table.clear(_G.vshard_calls) -end) - -rawset(_G, 'patch_vshard_calls', function(vshard_call_names) - local vshard = require('vshard') - - local replicasets = vshard.router.routeall() - - local _, replicaset = next(replicasets) - local replicaset_mt = getmetatable(replicaset) - - for _, vshard_call_name in ipairs(vshard_call_names) do - local old_func = replicaset_mt.__index[vshard_call_name] - assert(old_func ~= nil, vshard_call_name) - - replicaset_mt.__index[vshard_call_name] = function(...) - local func_name = select(2, ...) - if not string.startswith(func_name, 'vshard.') or func_name == 'vshard.storage.call' then - table.insert(_G.vshard_calls, vshard_call_name) - end - return old_func(...) - end - end -end) - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_read_calls_strategies/all_init.lua b/test/entrypoint/srv_read_calls_strategies/all_init.lua new file mode 100644 index 00000000..1827214d --- /dev/null +++ b/test/entrypoint/srv_read_calls_strategies/all_init.lua @@ -0,0 +1,29 @@ +return function() + rawset(_G, 'vshard_calls', {}) + + rawset(_G, 'clear_vshard_calls', function() + table.clear(_G.vshard_calls) + end) + + rawset(_G, 'patch_vshard_calls', function(vshard_call_names) + local vshard = require('vshard') + + local replicasets = vshard.router.routeall() + + local _, replicaset = next(replicasets) + local replicaset_mt = getmetatable(replicaset) + + for _, vshard_call_name in ipairs(vshard_call_names) do + local old_func = replicaset_mt.__index[vshard_call_name] + assert(old_func ~= nil, vshard_call_name) + + replicaset_mt.__index[vshard_call_name] = function(...) + local func_name = select(2, ...) + if not string.startswith(func_name, 'vshard.') or func_name == 'vshard.storage.call' then + table.insert(_G.vshard_calls, vshard_call_name) + end + return old_func(...) + end + end + end) +end diff --git a/test/entrypoint/srv_read_calls_strategies/cartridge_init.lua b/test/entrypoint/srv_read_calls_strategies/cartridge_init.lua new file mode 100755 index 00000000..96a94860 --- /dev/null +++ b/test/entrypoint/srv_read_calls_strategies/cartridge_init.lua @@ -0,0 +1,41 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = require('storage_init'), + } +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'customers-storage', + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + }, +}) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +require('all_init')() + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_read_calls_strategies/storage_init.lua b/test/entrypoint/srv_read_calls_strategies/storage_init.lua new file mode 100644 index 00000000..0973bb7d --- /dev/null +++ b/test/entrypoint/srv_read_calls_strategies/storage_init.lua @@ -0,0 +1,21 @@ +return function() + if box.info.ro == true then + return + end + + local engine = os.getenv('ENGINE') or 'memtx' + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'age', type = 'number'}, + }, + if_not_exists = true, + engine = engine, + }) + customers_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) +end diff --git a/test/entrypoint/srv_reload.lua b/test/entrypoint/srv_reload/cartridge_init.lua similarity index 100% rename from test/entrypoint/srv_reload.lua rename to test/entrypoint/srv_reload/cartridge_init.lua diff --git a/test/entrypoint/srv_say_hi.lua b/test/entrypoint/srv_say_hi.lua deleted file mode 100755 index b40c58bd..00000000 --- a/test/entrypoint/srv_say_hi.lua +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') -local membership = require('membership') -local fiber = require('fiber') - -local ok, err - -ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'cartridge.roles.crud-storage', - 'cartridge.roles.crud-router', - }, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - - -rawset(_G, 'say_hi_politely', function (to_name) - to_name = to_name or "handsome" - local my_alias = membership.myself().payload.alias - return string.format("HI, %s! I am %s", to_name, my_alias) -end) - -rawset(_G, 'say_hi_sleepily', function (time_to_sleep) - if time_to_sleep ~= nil then - fiber.sleep(time_to_sleep) - end - - return "HI" -end) - -rawset(_G, 'vshard_calls', {}) - -rawset(_G, 'clear_vshard_calls', function() - table.clear(_G.vshard_calls) -end) - -rawset(_G, 'patch_vshard_calls', function(vshard_call_names) - local vshard = require('vshard') - - local replicasets = vshard.router.routeall() - - local _, replicaset = next(replicasets) - local replicaset_mt = getmetatable(replicaset) - - for _, vshard_call_name in ipairs(vshard_call_names) do - local old_func = replicaset_mt.__index[vshard_call_name] - assert(old_func ~= nil, vshard_call_name) - - replicaset_mt.__index[vshard_call_name] = function(...) - local func_name = select(2, ...) - if not string.startswith(func_name, 'vshard.') or func_name == 'vshard.storage.call' then - table.insert(_G.vshard_calls, vshard_call_name) - end - return old_func(...) - end - end -end) - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_say_hi/all_init.lua b/test/entrypoint/srv_say_hi/all_init.lua new file mode 100644 index 00000000..dee0c40d --- /dev/null +++ b/test/entrypoint/srv_say_hi/all_init.lua @@ -0,0 +1,45 @@ +local fiber = require('fiber') + +return function() + rawset(_G, 'say_hi_politely', function (to_name) + to_name = to_name or "handsome" + local my_alias = box.info.id + return string.format("HI, %s! I am %s", to_name, my_alias) + end) + + rawset(_G, 'say_hi_sleepily', function (time_to_sleep) + if time_to_sleep ~= nil then + fiber.sleep(time_to_sleep) + end + + return "HI" + end) + + rawset(_G, 'vshard_calls', {}) + + rawset(_G, 'clear_vshard_calls', function() + table.clear(_G.vshard_calls) + end) + + rawset(_G, 'patch_vshard_calls', function(vshard_call_names) + local vshard = require('vshard') + + local replicasets = vshard.router.routeall() + + local _, replicaset = next(replicasets) + local replicaset_mt = getmetatable(replicaset) + + for _, vshard_call_name in ipairs(vshard_call_names) do + local old_func = replicaset_mt.__index[vshard_call_name] + assert(old_func ~= nil, vshard_call_name) + + replicaset_mt.__index[vshard_call_name] = function(...) + local func_name = select(2, ...) + if not string.startswith(func_name, 'vshard.') or func_name == 'vshard.storage.call' then + table.insert(_G.vshard_calls, vshard_call_name) + end + return old_func(...) + end + end + end) +end diff --git a/test/entrypoint/srv_shadowing.lua b/test/entrypoint/srv_say_hi/cartridge_init.lua similarity index 61% rename from test/entrypoint/srv_shadowing.lua rename to test/entrypoint/srv_say_hi/cartridge_init.lua index d1bb1fdb..1890e017 100755 --- a/test/entrypoint/srv_shadowing.lua +++ b/test/entrypoint/srv_say_hi/cartridge_init.lua @@ -6,7 +6,12 @@ _G.is_initialized = function() return false end local log = require('log') local errors = require('errors') local cartridge = require('cartridge') -local crud = require('crud') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end local ok, err @@ -25,17 +30,6 @@ if not ok then os.exit(1) end -rawset(_G, "global_func", function() return "global" end) -rawset(_G, "common_func", function() return "common-global" end) - -ok, err = crud.register({ - registered_func = function() return "registered" end, - common_func = function() return "common-registered" end, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end +require('all_init')() _G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_select.lua b/test/entrypoint/srv_select.lua deleted file mode 100755 index d006014e..00000000 --- a/test/entrypoint/srv_select.lua +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') -local crud_utils = require('crud.common.utils') - -package.preload['customers-storage'] = function() - local engine = os.getenv('ENGINE') or 'memtx' - return { - role_name = 'customers-storage', - init = function() - box.schema.space.create('no_index_space', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - }, - if_not_exists = true, - engine = engine, - }) - - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'last_name', type = 'string'}, - {name = 'age', type = 'number'}, - {name = 'city', type = 'string'}, - }, - if_not_exists = true, - engine = engine, - }) - -- primary index - customers_space:create_index('id_index', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - customers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - customers_space:create_index('age_index', { - parts = { {field = 'age'} }, - unique = false, - if_not_exists = true, - }) - -- indexes with same names as fields - customers_space:create_index('age', { - parts = { {field = 'age'} }, - unique = false, - if_not_exists = true, - }) - customers_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - customers_space:create_index('full_name', { - parts = { - {field = 'name', collation = 'unicode_ci'}, - {field = 'last_name', collation = 'unicode_ci'} , - }, - unique = false, - if_not_exists = true, - }) - - if crud_utils.tarantool_supports_uuids() then - local goods_space = box.schema.space.create('goods', { - format = { - {name = 'uuid', type = 'uuid'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'category_id', type = 'uuid'}, - }, - if_not_exists = true, - engine = engine, - }) - --primary index - goods_space:create_index('uuid', { - parts = { {field = 'uuid'} }, - if_not_exists = true, - }) - goods_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - end - - local coord_space = box.schema.space.create('coord', { - format = { - {name = 'x', type = 'unsigned'}, - {name = 'y', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - }, - if_not_exists = true, - engine = engine, - }) - -- primary index - coord_space:create_index('primary', { - parts = { - {field = 'x'}, - {field = 'y'}, - }, - if_not_exists = true, - }) - coord_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - - local book_translation = box.schema.space.create('book_translation', { - format = { - { name = 'id', type = 'unsigned' }, - { name = 'bucket_id', type = 'unsigned' }, - { name = 'language', type = 'string' }, - { name = 'edition', type = 'integer' }, - { name = 'translator', type = 'string' }, - { name = 'comments', type = 'string', is_nullable = true }, - }, - if_not_exists = true, - }) - - book_translation:create_index('id', { - parts = { 'id', 'language', 'edition' }, - if_not_exists = true, - }) - - book_translation:create_index('bucket_id', { - parts = { 'bucket_id' }, - unique = false, - if_not_exists = true, - }) - - local developers_space = box.schema.space.create('developers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'last_name', type = 'string'}, - {name = 'age', type = 'number'}, - {name = 'additional', type = 'any'}, - }, - if_not_exists = true, - engine = engine, - }) - - -- primary index - developers_space:create_index('id_index', { - parts = { 'id' }, - if_not_exists = true, - }) - - developers_space:create_index('bucket_id', { - parts = { 'bucket_id' }, - unique = false, - if_not_exists = true, - }) - - if crud_utils.tarantool_supports_jsonpath_indexes() then - local cars_space = box.schema.space.create('cars', { - format = { - {name = 'id', type = 'map'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'age', type = 'number'}, - {name = 'manufacturer', type = 'string'}, - {name = 'data', type = 'map'} - }, - if_not_exists = true, - engine = engine, - }) - - -- primary index - cars_space:create_index('id_ind', { - parts = { - {1, 'unsigned', path = 'car_id.signed'}, - }, - if_not_exists = true, - }) - - cars_space:create_index('bucket_id', { - parts = { 'bucket_id' }, - unique = false, - if_not_exists = true, - }) - - cars_space:create_index('data_index', { - parts = { - {5, 'str', path = 'car.color'}, - {5, 'str', path = 'car.model'}, - }, - unique = false, - if_not_exists = true, - }) - end - end, - } -end - -local box_opts = { - readahead = 10 * 1024 * 1024, -} -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - 'customers-storage', - }}, - box_opts -) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_select/cartridge_init.lua b/test/entrypoint/srv_select/cartridge_init.lua new file mode 100755 index 00000000..03a2c438 --- /dev/null +++ b/test/entrypoint/srv_select/cartridge_init.lua @@ -0,0 +1,43 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = require('storage_init') + } +end + +local box_opts = { + readahead = 10 * 1024 * 1024, +} +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + 'customers-storage', + }}, + box_opts +) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_select/storage_init.lua b/test/entrypoint/srv_select/storage_init.lua new file mode 100644 index 00000000..5bc49aa4 --- /dev/null +++ b/test/entrypoint/srv_select/storage_init.lua @@ -0,0 +1,195 @@ +local crud_utils = require('crud.common.utils') + +return function() + if box.info.ro == true then + return + end + + local engine = os.getenv('ENGINE') or 'memtx' + box.schema.space.create('no_index_space', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + }, + if_not_exists = true, + engine = engine, + }) + + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'last_name', type = 'string'}, + {name = 'age', type = 'number'}, + {name = 'city', type = 'string'}, + }, + if_not_exists = true, + engine = engine, + }) + -- primary index + customers_space:create_index('id_index', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + customers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + customers_space:create_index('age_index', { + parts = { {field = 'age'} }, + unique = false, + if_not_exists = true, + }) + -- indexes with same names as fields + customers_space:create_index('age', { + parts = { {field = 'age'} }, + unique = false, + if_not_exists = true, + }) + customers_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + customers_space:create_index('full_name', { + parts = { + {field = 'name', collation = 'unicode_ci'}, + {field = 'last_name', collation = 'unicode_ci'} , + }, + unique = false, + if_not_exists = true, + }) + + if crud_utils.tarantool_supports_uuids() then + local goods_space = box.schema.space.create('goods', { + format = { + {name = 'uuid', type = 'uuid'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'category_id', type = 'uuid'}, + }, + if_not_exists = true, + engine = engine, + }) + --primary index + goods_space:create_index('uuid', { + parts = { {field = 'uuid'} }, + if_not_exists = true, + }) + goods_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + end + + local coord_space = box.schema.space.create('coord', { + format = { + {name = 'x', type = 'unsigned'}, + {name = 'y', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + }, + if_not_exists = true, + engine = engine, + }) + -- primary index + coord_space:create_index('primary', { + parts = { + {field = 'x'}, + {field = 'y'}, + }, + if_not_exists = true, + }) + coord_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + + local book_translation = box.schema.space.create('book_translation', { + format = { + { name = 'id', type = 'unsigned' }, + { name = 'bucket_id', type = 'unsigned' }, + { name = 'language', type = 'string' }, + { name = 'edition', type = 'integer' }, + { name = 'translator', type = 'string' }, + { name = 'comments', type = 'string', is_nullable = true }, + }, + if_not_exists = true, + }) + + book_translation:create_index('id', { + parts = { 'id', 'language', 'edition' }, + if_not_exists = true, + }) + + book_translation:create_index('bucket_id', { + parts = { 'bucket_id' }, + unique = false, + if_not_exists = true, + }) + + local developers_space = box.schema.space.create('developers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'last_name', type = 'string'}, + {name = 'age', type = 'number'}, + {name = 'additional', type = 'any'}, + }, + if_not_exists = true, + engine = engine, + }) + + -- primary index + developers_space:create_index('id_index', { + parts = { 'id' }, + if_not_exists = true, + }) + + developers_space:create_index('bucket_id', { + parts = { 'bucket_id' }, + unique = false, + if_not_exists = true, + }) + + if crud_utils.tarantool_supports_jsonpath_indexes() then + local cars_space = box.schema.space.create('cars', { + format = { + {name = 'id', type = 'map'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'age', type = 'number'}, + {name = 'manufacturer', type = 'string'}, + {name = 'data', type = 'map'} + }, + if_not_exists = true, + engine = engine, + }) + + -- primary index + cars_space:create_index('id_ind', { + parts = { + {1, 'unsigned', path = 'car_id.signed'}, + }, + if_not_exists = true, + }) + + cars_space:create_index('bucket_id', { + parts = { 'bucket_id' }, + unique = false, + if_not_exists = true, + }) + + cars_space:create_index('data_index', { + parts = { + {5, 'str', path = 'car.color'}, + {5, 'str', path = 'car.model'}, + }, + unique = false, + if_not_exists = true, + }) + end +end diff --git a/test/entrypoint/srv_simple_operations.lua b/test/entrypoint/srv_simple_operations.lua deleted file mode 100755 index a2a75af5..00000000 --- a/test/entrypoint/srv_simple_operations.lua +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') - -package.preload['customers-storage'] = function() - return { - role_name = 'customers-storage', - init = function() - local engine = os.getenv('ENGINE') or 'memtx' - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'age', type = 'number'}, - }, - if_not_exists = true, - engine = engine, - }) - customers_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - customers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - - local developers_space = box.schema.space.create('developers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - }, - if_not_exists = true, - engine = engine, - }) - developers_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - developers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - - rawset(_G, 'add_extra_field', function(space_name, field_name) - local space = box.space[space_name] - local new_format = space:format() - table.insert(new_format, {name = field_name, type = 'any', is_nullable = true}) - space:format(new_format) - end) - - rawset(_G, 'create_space_for_gh_326_cases', function() - local countries_space = box.schema.space.create('countries', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'population', type = 'unsigned'}, - }, - if_not_exists = true, - engine = os.getenv('ENGINE') or 'memtx', - }) - countries_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - countries_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - end) - - rawset(_G, 'drop_space_for_gh_326_cases', function() - box.space['countries']:drop() - end) - - -- Space with huge amount of nullable fields - -- an object that inserted in such space should get - -- explicit nulls in absence fields otherwise - -- Tarantool serializers could consider such object as map (not array). - local tags_space = box.schema.space.create('tags', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'is_red', type = 'boolean', is_nullable = true}, - {name = 'is_green', type = 'boolean', is_nullable = true}, - {name = 'is_blue', type = 'boolean', is_nullable = true}, - {name = 'is_yellow', type = 'boolean', is_nullable = true}, - {name = 'is_sweet', type = 'boolean', is_nullable = true}, - {name = 'is_dirty', type = 'boolean', is_nullable = true}, - {name = 'is_long', type = 'boolean', is_nullable = true}, - {name = 'is_short', type = 'boolean', is_nullable = true}, - {name = 'is_useful', type = 'boolean', is_nullable = true}, - {name = 'is_correct', type = 'boolean', is_nullable = true}, - }, - if_not_exists = true, - engine = engine, - }) - - tags_space:create_index('id', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - tags_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - - local sequence_space = box.schema.space.create('notebook', { - format = { - {name = 'local_id', type = 'unsigned', is_nullable = false}, - {name = 'bucket_id', type = 'unsigned', is_nullable = false}, - {name = 'record', type = 'string', is_nullable = false}, - }, - if_not_exists = true, - engine = engine, - }) - - box.schema.sequence.create('local_id', {if_not_exists = true}) - - sequence_space:create_index('local_id', { - parts = { {field = 'local_id'} }, - unique = true, - if_not_exists = true, - sequence = 'local_id', - }) - sequence_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - end, - } -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'customers-storage', - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - }, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_simple_operations/cartridge_init.lua b/test/entrypoint/srv_simple_operations/cartridge_init.lua new file mode 100755 index 00000000..c4579622 --- /dev/null +++ b/test/entrypoint/srv_simple_operations/cartridge_init.lua @@ -0,0 +1,39 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = require('storage_init') + } +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'customers-storage', + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + }, +}) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_simple_operations/storage_init.lua b/test/entrypoint/srv_simple_operations/storage_init.lua new file mode 100644 index 00000000..af90840b --- /dev/null +++ b/test/entrypoint/srv_simple_operations/storage_init.lua @@ -0,0 +1,134 @@ +return function() + if box.info.ro == true then + return + end + + local engine = os.getenv('ENGINE') or 'memtx' + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'age', type = 'number'}, + }, + if_not_exists = true, + engine = engine, + }) + customers_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + customers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + + local developers_space = box.schema.space.create('developers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + }, + if_not_exists = true, + engine = engine, + }) + developers_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + developers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + + rawset(_G, 'add_extra_field', function(space_name, field_name) + local space = box.space[space_name] + local new_format = space:format() + table.insert(new_format, {name = field_name, type = 'any', is_nullable = true}) + space:format(new_format) + end) + + rawset(_G, 'create_space_for_gh_326_cases', function() + local countries_space = box.schema.space.create('countries', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'population', type = 'unsigned'}, + }, + if_not_exists = true, + engine = os.getenv('ENGINE') or 'memtx', + }) + countries_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + countries_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + end) + + rawset(_G, 'drop_space_for_gh_326_cases', function() + box.space['countries']:drop() + end) + + -- Space with huge amount of nullable fields + -- an object that inserted in such space should get + -- explicit nulls in absence fields otherwise + -- Tarantool serializers could consider such object as map (not array). + local tags_space = box.schema.space.create('tags', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'is_red', type = 'boolean', is_nullable = true}, + {name = 'is_green', type = 'boolean', is_nullable = true}, + {name = 'is_blue', type = 'boolean', is_nullable = true}, + {name = 'is_yellow', type = 'boolean', is_nullable = true}, + {name = 'is_sweet', type = 'boolean', is_nullable = true}, + {name = 'is_dirty', type = 'boolean', is_nullable = true}, + {name = 'is_long', type = 'boolean', is_nullable = true}, + {name = 'is_short', type = 'boolean', is_nullable = true}, + {name = 'is_useful', type = 'boolean', is_nullable = true}, + {name = 'is_correct', type = 'boolean', is_nullable = true}, + }, + if_not_exists = true, + engine = engine, + }) + + tags_space:create_index('id', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + tags_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + + local sequence_space = box.schema.space.create('notebook', { + format = { + {name = 'local_id', type = 'unsigned', is_nullable = false}, + {name = 'bucket_id', type = 'unsigned', is_nullable = false}, + {name = 'record', type = 'string', is_nullable = false}, + }, + if_not_exists = true, + engine = engine, + }) + + box.schema.sequence.create('local_id', {if_not_exists = true}) + + sequence_space:create_index('local_id', { + parts = { {field = 'local_id'} }, + unique = true, + if_not_exists = true, + sequence = 'local_id', + }) + sequence_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) +end diff --git a/test/entrypoint/srv_stats.lua b/test/entrypoint/srv_stats.lua deleted file mode 100755 index c23d57cf..00000000 --- a/test/entrypoint/srv_stats.lua +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') - -local crud_utils = require('crud.common.utils') - -package.preload['customers-storage'] = function() - local engine = os.getenv('ENGINE') or 'memtx' - return { - role_name = 'customers-storage', - init = function() - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'name', type = 'string'}, - {name = 'last_name', type = 'string'}, - {name = 'age', type = 'number'}, - {name = 'city', type = 'string'}, - }, - if_not_exists = true, - engine = engine, - id = 542, - }) - -- primary index - customers_space:create_index('id_index', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - customers_space:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - unique = false, - if_not_exists = true, - }) - customers_space:create_index('age_index', { - parts = { {field = 'age'} }, - unique = false, - if_not_exists = true, - }) - end, - } -end - -local roles_reload_allowed = nil -if crud_utils.is_cartridge_hotreload_supported() then - roles_reload_allowed = true -end - -local is_metrics = pcall(require, 'metrics') -local roles = { - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - 'customers-storage', -} -if is_metrics then - table.insert(roles, 'cartridge.roles.metrics') -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = roles, - roles_reload_allowed = roles_reload_allowed, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_stats/cartridge_init.lua b/test/entrypoint/srv_stats/cartridge_init.lua new file mode 100755 index 00000000..3cdd3ccf --- /dev/null +++ b/test/entrypoint/srv_stats/cartridge_init.lua @@ -0,0 +1,53 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +local crud_utils = require('crud.common.utils') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = require('storage_init'), + } +end + +local roles_reload_allowed = nil +if crud_utils.is_cartridge_hotreload_supported() then + roles_reload_allowed = true +end + +local is_metrics = pcall(require, 'metrics') +local roles = { + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + 'customers-storage', +} +if is_metrics then + table.insert(roles, 'cartridge.roles.metrics') +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = roles, + roles_reload_allowed = roles_reload_allowed, +}) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_stats/storage_init.lua b/test/entrypoint/srv_stats/storage_init.lua new file mode 100644 index 00000000..d64d5fc1 --- /dev/null +++ b/test/entrypoint/srv_stats/storage_init.lua @@ -0,0 +1,35 @@ +return function() + if box.info.ro == true then + return + end + + local engine = os.getenv('ENGINE') or 'memtx' + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'name', type = 'string'}, + {name = 'last_name', type = 'string'}, + {name = 'age', type = 'number'}, + {name = 'city', type = 'string'}, + }, + if_not_exists = true, + engine = engine, + id = 542, + }) + -- primary index + customers_space:create_index('id_index', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + customers_space:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + unique = false, + if_not_exists = true, + }) + customers_space:create_index('age_index', { + parts = { {field = 'age'} }, + unique = false, + if_not_exists = true, + }) +end diff --git a/test/entrypoint/srv_update_schema.lua b/test/entrypoint/srv_update_schema.lua deleted file mode 100755 index 4fe8f471..00000000 --- a/test/entrypoint/srv_update_schema.lua +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env tarantool - -require('strict').on() -_G.is_initialized = function() return false end - -local log = require('log') -local errors = require('errors') -local cartridge = require('cartridge') - -package.preload['customers-storage'] = function() - local engine = os.getenv('ENGINE') or 'memtx' - return { - role_name = 'customers-storage', - init = function() - rawset(_G, 'create_space', function() - local customers_space = box.schema.space.create('customers', { - format = { - {name = 'id', type = 'unsigned'}, - {name = 'bucket_id', type = 'unsigned'}, - {name = 'value', type = 'string'}, - {name = 'number', type = 'integer', is_nullable = true}, - }, - if_not_exists = true, - engine = engine, - }) - - -- primary index - customers_space:create_index('id_index', { - parts = { {field = 'id'} }, - if_not_exists = true, - }) - end) - - rawset(_G, 'create_bucket_id_index', function() - box.space.customers:create_index('bucket_id', { - parts = { {field = 'bucket_id'} }, - if_not_exists = true, - unique = false, - }) - end) - - rawset(_G, 'set_value_type_to_unsigned', function() - local new_format = {} - - for _, field_format in ipairs(box.space.customers:format()) do - if field_format.name == 'value' then - field_format.type = 'unsigned' - end - table.insert(new_format, field_format) - end - - box.space.customers:format(new_format) - end) - - rawset(_G, 'add_extra_field', function() - local new_format = box.space.customers:format() - table.insert(new_format, {name = 'extra', type = 'string', is_nullable = true}) - box.space.customers:format(new_format) - end) - - rawset(_G, 'add_value_index', function() - box.space.customers:create_index('value_index', { - parts = { {field = 'value'} }, - if_not_exists = true, - unique = false, - }) - end) - - rawset(_G, 'create_number_value_index', function() - box.space.customers:create_index('number_value_index', { - parts = { {field = 'number'}, {field = 'value'} }, - if_not_exists = true, - unique = false, - }) - end) - - rawset(_G, 'alter_number_value_index', function() - box.space.customers.index.number_value_index:alter({ - parts = { {field = 'value'}, {field = 'number'} }, - }) - end) - end, - } -end - -local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { - advertise_uri = 'localhost:3301', - http_port = 8081, - bucket_count = 3000, - roles = { - 'cartridge.roles.crud-router', - 'cartridge.roles.crud-storage', - 'customers-storage', - }, -}) - -if not ok then - log.error('%s', err) - os.exit(1) -end - -_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_update_schema/cartridge_init.lua b/test/entrypoint/srv_update_schema/cartridge_init.lua new file mode 100755 index 00000000..ba9b4274 --- /dev/null +++ b/test/entrypoint/srv_update_schema/cartridge_init.lua @@ -0,0 +1,39 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +if package.setsearchroot ~= nil then + package.setsearchroot() +else + package.path = package.path .. debug.sourcedir() .. "/?.lua;" +end + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = require('storage_init'), + } +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + 'customers-storage', + }, +}) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/entrypoint/srv_update_schema/storage_init.lua b/test/entrypoint/srv_update_schema/storage_init.lua new file mode 100644 index 00000000..0c81fa88 --- /dev/null +++ b/test/entrypoint/srv_update_schema/storage_init.lua @@ -0,0 +1,74 @@ +return function() + if box.info.ro == true then + return + end + + local engine = os.getenv('ENGINE') or 'memtx' + rawset(_G, 'create_space', function() + local customers_space = box.schema.space.create('customers', { + format = { + {name = 'id', type = 'unsigned'}, + {name = 'bucket_id', type = 'unsigned'}, + {name = 'value', type = 'string'}, + {name = 'number', type = 'integer', is_nullable = true}, + }, + if_not_exists = true, + engine = engine, + }) + + -- primary index + customers_space:create_index('id_index', { + parts = { {field = 'id'} }, + if_not_exists = true, + }) + end) + + rawset(_G, 'create_bucket_id_index', function() + box.space.customers:create_index('bucket_id', { + parts = { {field = 'bucket_id'} }, + if_not_exists = true, + unique = false, + }) + end) + + rawset(_G, 'set_value_type_to_unsigned', function() + local new_format = {} + + for _, field_format in ipairs(box.space.customers:format()) do + if field_format.name == 'value' then + field_format.type = 'unsigned' + end + table.insert(new_format, field_format) + end + + box.space.customers:format(new_format) + end) + + rawset(_G, 'add_extra_field', function() + local new_format = box.space.customers:format() + table.insert(new_format, {name = 'extra', type = 'string', is_nullable = true}) + box.space.customers:format(new_format) + end) + + rawset(_G, 'add_value_index', function() + box.space.customers:create_index('value_index', { + parts = { {field = 'value'} }, + if_not_exists = true, + unique = false, + }) + end) + + rawset(_G, 'create_number_value_index', function() + box.space.customers:create_index('number_value_index', { + parts = { {field = 'number'}, {field = 'value'} }, + if_not_exists = true, + unique = false, + }) + end) + + rawset(_G, 'alter_number_value_index', function() + box.space.customers.index.number_value_index:alter({ + parts = { {field = 'value'}, {field = 'number'} }, + }) + end) +end diff --git a/test/entrypoint/srv_vshard_custom.lua b/test/entrypoint/srv_vshard_custom/cartridge_init.lua similarity index 100% rename from test/entrypoint/srv_vshard_custom.lua rename to test/entrypoint/srv_vshard_custom/cartridge_init.lua diff --git a/test/helper.lua b/test/helper.lua index 8109fd66..94a8daed 100644 --- a/test/helper.lua +++ b/test/helper.lua @@ -1,30 +1,49 @@ require('strict').on() local t = require('luatest') +local vtest = require('test.vshard_helpers.vtest') + +local luatest_capture = require('luatest.capture') +local luatest_helpers = require('luatest.helpers') local luatest_utils = require('luatest.utils') -local log = require('log') local checks = require('checks') local digest = require('digest') local fio = require('fio') local crud = require('crud') local crud_utils = require('crud.common.utils') +local cartridge_installed, cartridge_helpers = pcall(require, 'cartridge.test-helpers') if os.getenv('TARANTOOL_CRUD_ENABLE_INTERNAL_CHECKS') == nil then os.setenv('TARANTOOL_CRUD_ENABLE_INTERNAL_CHECKS', 'ON') end -local helpers = {} +local helpers = { + backend = { + VSHARD = 'vshard', + CARTRIDGE = 'cartridge', + }, +} -local ok, cartridge_helpers = pcall(require, 'cartridge.test-helpers') -if not ok then - log.error('Please, install cartridge rock to run tests') - os.exit(1) +local function is_cartridge_supported() + local tarantool_version = luatest_utils.get_tarantool_version() + local unsupported_version = luatest_utils.version(3, 0, 0) + return not luatest_utils.version_ge(tarantool_version, unsupported_version) end -for name, value in pairs(cartridge_helpers) do - helpers[name] = value +local function is_cartridge_installed() + return cartridge_installed +end + +if is_cartridge_supported() and is_cartridge_installed() then + for name, value in pairs(cartridge_helpers) do + helpers[name] = value + end +else + for name, value in pairs(luatest_helpers) do + helpers[name] = value + end end helpers.project_root = fio.dirname(debug.sourcedir()) @@ -43,18 +62,42 @@ fio.tempdir = function(base) end end -function helpers.entrypoint(name) +function helpers.entrypoint_cartridge(name) local path = fio.pathjoin( helpers.project_root, - 'test', 'entrypoint', - string.format('%s.lua', name) + 'test', 'entrypoint', name, 'cartridge_init.lua' ) if not fio.path.exists(path) then - error(path .. ': no such entrypoint', 2) + error(path .. ': no such cartridge entrypoint', 2) end return path end +local function entrypoint_vshard(name, entrypoint, err) + local path = fio.pathjoin( + 'test', 'entrypoint', name, entrypoint + ) + if not fio.path.exists(path .. ".lua") then + if err then + error(path .. ': no such entrypoint', 2) + end + return nil, false + end + return path, true +end + +function helpers.entrypoint_vshard_storage(name) + return entrypoint_vshard(name, 'storage_init', true) +end + +function helpers.entrypoint_vshard_router(name) + return entrypoint_vshard(name, 'router_init', true) +end + +function helpers.entrypoint_vshard_all(name) + return entrypoint_vshard(name, 'all_init', true) +end + function helpers.table_keys(t) checks('table') local keys = {} @@ -111,7 +154,7 @@ function helpers.get_objects_by_idxs(objects, idxs) return results end -function helpers.stop_cluster(cluster) +function helpers.stop_cartridge_cluster(cluster) assert(cluster ~= nil) cluster:stop() fio.rmtree(cluster.datadir) @@ -156,7 +199,7 @@ function helpers.reset_sequence_on_cluster(cluster, sequence_name) end end -function helpers.get_test_replicasets() +function helpers.get_test_cartridge_replicasets() return { { uuid = helpers.uuid('a'), @@ -172,7 +215,7 @@ function helpers.get_test_replicasets() roles = { 'customers-storage', 'crud-storage' }, servers = { { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, - { instance_uuid = helpers.uuid('b', 2), alias = 's1-replica' }, + { instance_uuid = helpers.uuid('b', 10), alias = 's1-replica' }, }, }, { @@ -181,12 +224,40 @@ function helpers.get_test_replicasets() roles = { 'customers-storage', 'crud-storage' }, servers = { { instance_uuid = helpers.uuid('c', 1), alias = 's2-master' }, - { instance_uuid = helpers.uuid('c', 2), alias = 's2-replica' }, + { instance_uuid = helpers.uuid('c', 10), alias = 's2-replica' }, }, } } end +function helpers.get_test_vshard_sharding() + local sharding = { + { + replicas = { + ['s1-master'] = { + instance_uuid = helpers.uuid('b', 1), + master = true, + }, + ['s1-replica'] = { + instance_uuid = helpers.uuid('b', 10), + }, + }, + }, + { + replicas = { + ['s2-master'] = { + instance_uuid = helpers.uuid('c', 1), + master = true, + }, + ['s2-replica'] = { + instance_uuid = helpers.uuid('c', 10), + }, + }, + }, + } + return sharding +end + function helpers.call_on_servers(cluster, aliases, func) for _, alias in ipairs(aliases) do local server = cluster:server(alias) @@ -236,30 +307,38 @@ function helpers.call_on_storages(cluster, func, ...) -- NB: The 'servers' field contains server configs. They are -- not the same as server objects: say, there is no 'net_box' -- field here. - local alias_map = {} - for _, replicaset in ipairs(cluster.replicasets) do - -- Whether it is a storage replicaset? - local has_crud_storage_role = false - for _, role in ipairs(replicaset.roles) do - if role == 'crud-storage' then - has_crud_storage_role = true - break + if cluster.replicasets ~= nil then + local alias_map = {} + for _, replicaset in ipairs(cluster.replicasets) do + -- Whether it is a storage replicaset? + local has_crud_storage_role = false + for _, role in ipairs(replicaset.roles) do + if role == 'crud-storage' then + has_crud_storage_role = true + break + end end - end - -- If so, add servers of the replicaset into the mapping. - if has_crud_storage_role then - for _, server in ipairs(replicaset.servers) do - alias_map[server.alias] = replicaset + -- If so, add servers of the replicaset into the mapping. + if has_crud_storage_role then + for _, server in ipairs(replicaset.servers) do + alias_map[server.alias] = replicaset + end end end - end - -- Call given function for each storage node. - for _, server in ipairs(cluster.servers) do - local replicaset_alias = alias_map[server.alias] - if replicaset_alias ~= nil then - func(server, replicaset_alias, ...) + -- Call given function for each storage node. + for _, server in ipairs(cluster.servers) do + local replicaset_alias = alias_map[server.alias] + if replicaset_alias ~= nil then + func(server, replicaset_alias, ...) + end + end + else + for _, server in ipairs(cluster.servers) do + if server.vtest and server.vtest.is_storage then + func(server, server.vtest.replicaset, ...) + end end end end @@ -511,6 +590,50 @@ function helpers.assert_timeout_error(value, message) error(err, 2) end +function helpers.get_command_log(router, backend, call, args) + local capture + local logfile + if backend == helpers.backend.CARTRIDGE then + capture = luatest_capture:new() + capture:enable() + elseif backend == helpers.backend.VSHARD then + local logpath = router.net_box:eval('return box.cfg.log') + logfile = fio.open(logpath, {'O_RDONLY', 'O_NONBLOCK'}) + logfile:read() + end + + local _, err = router.net_box:call(call, args) + if err ~= nil then + if backend == helpers.backend.CARTRIDGE then + capture:disable() + elseif backend == helpers.backend.VSHARD then + logfile:close() + end + return nil, err + end + + -- Sometimes we have a delay here. This hack helps to wait for the end of + -- the output. It shouldn't take much time. + router.net_box:eval([[ + require('log').error("crud fflush message") + ]]) + local captured = "" + while not string.find(captured, "crud fflush message", 1, true) do + if backend == helpers.backend.CARTRIDGE then + captured = captured .. (capture:flush().stdout or "") + elseif backend == helpers.backend.VSHARD then + captured = captured .. (logfile:read() or "") + end + end + + if backend == helpers.backend.CARTRIDGE then + capture:disable() + elseif backend == helpers.backend.VSHARD then + logfile:close() + end + return captured, nil +end + function helpers.fflush_main_server_stdout(cluster, capture) -- Sometimes we have a delay here. This hack helps to wait for the end of -- the output. It shouldn't take much time. @@ -548,6 +671,17 @@ function helpers.is_metrics_0_12_0_or_older() return false end +function helpers.skip_cartridge_unsupported() + t.skip_if(not is_cartridge_supported(), + "Cartridge is not supported on Tarantool 3+") + t.skip_if(not is_cartridge_installed(), + "Cartridge is not installed") +end + +function helpers.skip_not_cartridge_backend(backend) + t.skip_if(backend ~= helpers.backend.CARTRIDGE, "The test is for cartridge only") +end + function helpers.is_cartridge_hotreload_supported() return crud_utils.is_cartridge_hotreload_supported() end @@ -565,4 +699,73 @@ function helpers.skip_old_tarantool_cartridge_hotreload() "Cartridge hotreload tests stuck for vshard 0.1.22+ on Tarantool 2.2, 2.3 and 2.4") end +function helpers.start_default_cluster(g, srv_name) + local cartridge_cfg = { + datadir = fio.tempdir(), + server_command = helpers.entrypoint_cartridge(srv_name), + use_vshard = true, + replicasets = helpers.get_test_cartridge_replicasets(), + } + local vshard_cfg = { + sharding = helpers.get_test_vshard_sharding(), + bucket_count = 3000, + storage_init = entrypoint_vshard(srv_name, 'storage_init', false), + router_init = entrypoint_vshard(srv_name, 'router_init', false), + all_init = entrypoint_vshard(srv_name, 'all_init', false), + crud_init = true, + } + + helpers.start_cluster(g, cartridge_cfg, vshard_cfg) +end + +function helpers.start_cluster(g, cartridge_cfg, vshard_cfg) + if g.params.backend == helpers.backend.CARTRIDGE then + helpers.skip_cartridge_unsupported() + + local cfg = table.deepcopy(cartridge_cfg) + cfg.env = { + ['ENGINE'] = g.params.engine + } + + g.cluster = helpers.Cluster:new(cfg) + g.cluster:start() + elseif g.params.backend == helpers.backend.VSHARD then + local cfg = table.deepcopy(vshard_cfg) + cfg.engine = g.params.engine + + local global_cfg = vtest.config_new(cfg) + vtest.cluster_new(g, global_cfg) + end +end + +function helpers.stop_cluster(cluster, backend) + if backend == helpers.backend.CARTRIDGE then + helpers.stop_cartridge_cluster(cluster) + elseif backend == helpers.backend.VSHARD then + cluster:drop() + end +end + +function helpers.get_router(cluster, backend) + if backend == helpers.backend.CARTRIDGE then + return cluster:server('router') + elseif backend == helpers.backend.VSHARD then + return cluster.main_server + end +end + +function helpers.backend_matrix(base_matrix) + base_matrix = base_matrix or {{}} + local backends = {helpers.backend.VSHARD, helpers.backend.CARTRIDGE} + local matrix = {} + for _, backend in ipairs(backends) do + for _, base in ipairs(base_matrix) do + base = table.deepcopy(base) + base.backend = backend + table.insert(matrix, base) + end + end + return matrix +end + return helpers diff --git a/test/integration/borders_test.lua b/test/integration/borders_test.lua index 3519f4af..7d4e5384 100644 --- a/test/integration/borders_test.lua +++ b/test/integration/borders_test.lua @@ -1,33 +1,23 @@ -local fio = require('fio') - local t = require('luatest') local crud = require('crud') local helpers = require('test.helper') -local pgroup = t.group('borders', { +local pgroup = t.group('borders', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/reload_test.lua b/test/integration/cartridge_reload_test.lua similarity index 95% rename from test/integration/reload_test.lua rename to test/integration/cartridge_reload_test.lua index 908178d2..c426706c 100644 --- a/test/integration/reload_test.lua +++ b/test/integration/cartridge_reload_test.lua @@ -9,11 +9,13 @@ local g = t.group() local helpers = require('test.helper') g.before_all(function() + helpers.skip_cartridge_unsupported() + g.cluster = helpers.Cluster:new({ datadir = fio.tempdir(), use_vshard = true, - server_command = helpers.entrypoint('srv_reload'), - replicasets = helpers.get_test_replicasets(), + server_command = helpers.entrypoint_cartridge('srv_reload'), + replicasets = helpers.get_test_cartridge_replicasets(), }) g.cluster:start() diff --git a/test/integration/cfg_test.lua b/test/integration/cfg_test.lua index 1552b02d..7f6f57e8 100644 --- a/test/integration/cfg_test.lua +++ b/test/integration/cfg_test.lua @@ -1,27 +1,22 @@ -local fio = require('fio') - local t = require('luatest') local stats = require('crud.stats') local helpers = require('test.helper') -local group = t.group('cfg') +local group = t.group('cfg', helpers.backend_matrix()) group.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_stats'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - }) + helpers.start_default_cluster(g, 'srv_stats') - g.cluster:start() + g.router = helpers.get_router(g.cluster, g.params.backend) end) -group.after_all(function(g) helpers.stop_cluster(g.cluster) end) +group.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) group.test_defaults = function(g) - local cfg = g.cluster:server('router'):eval("return require('crud').cfg") + local cfg = g.router:eval("return require('crud').cfg") t.assert_equals(cfg, { stats = false, stats_driver = stats.get_default_driver(), @@ -33,12 +28,12 @@ group.test_defaults = function(g) end group.test_change_value = function(g) - local new_cfg = g.cluster:server('router'):eval("return require('crud').cfg({ stats = true })") + local new_cfg = g.router:eval("return require('crud').cfg({ stats = true })") t.assert_equals(new_cfg.stats, true) end group.test_table_is_immutable = function(g) - local router = g.cluster:server('router') + local router = g.router t.assert_error_msg_contains( 'Use crud.cfg{} instead', @@ -58,7 +53,7 @@ group.test_table_is_immutable = function(g) end group.test_package_reload_preserves_values = function(g) - local router = g.cluster:server('router') + local router = g.router -- Generate some non-default values. router:eval("return require('crud').cfg({ stats = true })") @@ -70,11 +65,12 @@ group.test_package_reload_preserves_values = function(g) end group.test_role_reload_preserves_values = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) t.skip_if(not helpers.is_cartridge_hotreload_supported(), "Cartridge roles reload is not supported") helpers.skip_old_tarantool_cartridge_hotreload() - local router = g.cluster:server('router') + local router = g.router -- Generate some non-default values. router:eval("return require('crud').cfg({ stats = true })") @@ -87,19 +83,19 @@ end group.test_gh_284_preset_stats_quantile_tolerated_error_is_preserved = function(g) -- Arrange some cfg values so test case will not depend on defaults. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats = false }}) t.assert_equals(cfg.stats, false) -- Set stats_quantile_tolerated_error. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats_quantile_tolerated_error = 1e-4 }}) t.assert_equals(cfg.stats_quantile_tolerated_error, 1e-4) -- Set another cfg parameter, assert preset stats_quantile_tolerated_error presents. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats = true }}) t.assert_equals(cfg.stats, true) @@ -109,19 +105,19 @@ end group.test_gh_284_preset_stats_quantile_age_buckets_count_is_preserved = function(g) -- Arrange some cfg values so test case will not depend on defaults. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats = false }}) t.assert_equals(cfg.stats, false) -- Set stats_age_buckets_count. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats_quantile_age_buckets_count = 3 }}) t.assert_equals(cfg.stats_quantile_age_buckets_count, 3) -- Set another cfg parameter, assert preset stats_quantile_age_buckets_count presents. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats = true }}) t.assert_equals(cfg.stats, true) @@ -131,19 +127,19 @@ end group.test_gh_284_preset_stats_quantile_max_age_time_is_preserved = function(g) -- Arrange some cfg values so test case will not depend on defaults. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats = false }}) t.assert_equals(cfg.stats, false) -- Set stats_age_buckets_count. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats_quantile_max_age_time = 30 }}) t.assert_equals(cfg.stats_quantile_max_age_time, 30) -- Set another cfg parameter, assert preset stats_quantile_max_age_time presents. - local cfg = g.cluster:server('router'):eval( + local cfg = g.router:eval( "return require('crud').cfg(...)", {{ stats = true }}) t.assert_equals(cfg.stats, true) @@ -152,6 +148,8 @@ group.test_gh_284_preset_stats_quantile_max_age_time_is_preserved = function(g) end group.test_role_cfg = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) + local cfg = { stats = true, stats_driver = 'local', @@ -163,12 +161,14 @@ group.test_role_cfg = function(g) g.cluster.main_server:upload_config({crud = cfg}) - local actual_cfg = g.cluster:server('router'):eval("return require('crud').cfg") + local actual_cfg = g.router:eval("return require('crud').cfg") t.assert_equals(cfg, actual_cfg) end group.test_role_partial_cfg = function(g) - local router = g.cluster:server('router') + helpers.skip_not_cartridge_backend(g.params.backend) + + local router = g.router local cfg_before = router:eval("return require('crud').cfg()") local cfg_after = table.deepcopy(cfg_before) @@ -176,7 +176,7 @@ group.test_role_partial_cfg = function(g) g.cluster.main_server:upload_config({crud = {stats = cfg_after.stats}}) - local actual_cfg = g.cluster:server('router'):eval("return require('crud').cfg") + local actual_cfg = g.router:eval("return require('crud').cfg") t.assert_equals(cfg_after, actual_cfg, "Only requested field were updated") end @@ -201,7 +201,8 @@ local role_cfg_error_cases = { } for name, case in pairs(role_cfg_error_cases) do - group['test_rolce_cfg_' .. name] = function(g) + group['test_role_cfg_' .. name] = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) local success, error = pcall(function() g.cluster.main_server:upload_config(case.args) end) diff --git a/test/integration/count_test.lua b/test/integration/count_test.lua index d61877db..d82367d6 100644 --- a/test/integration/count_test.lua +++ b/test/integration/count_test.lua @@ -1,38 +1,29 @@ -local fio = require('fio') local clock = require('clock') local t = require('luatest') -local luatest_capture = require('luatest.capture') local helpers = require('test.helper') -local pgroup = t.group('count', { +local pgroup = t.group('count', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) + helpers.start_default_cluster(g, 'srv_select') - g.cluster:start() - - g.cluster:server('router').net_box:eval([[ + g.router = helpers.get_router(g.cluster, g.params.backend) + g.router.net_box:eval([[ require('crud').cfg{ stats = true } ]]) - g.cluster:server('router').net_box:eval([[ + g.router.net_box:eval([[ require('crud.ratelimit').disable() ]]) end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') @@ -148,21 +139,15 @@ for name, case in pairs(count_safety_cases) do pgroup[test_name] = function(g) local uc = case.user_conditions local opts = case.opts - local capture = luatest_capture:new() - capture:enable() + local captured, err = helpers.get_command_log(g.router, + g.params.backend, 'crud.count', {space, uc, opts}) - local _, err = g.cluster.main_server.net_box:call('crud.count', {space, uc, opts}) t.assert_equals(err, nil) - - local captured = helpers.fflush_main_server_stdout(g.cluster, capture) - if case.has_crit then t.assert_str_contains(captured, crit_log) else t.assert_equals(string.find(captured, crit_log, 1, true), nil) end - - capture:disable() end end @@ -657,7 +642,7 @@ pgroup.test_count_no_map_reduce = function(g) }, }) - local router = g.cluster:server('router').net_box + local router = g.router.net_box local map_reduces_before = helpers.get_map_reduces_stat(router, 'customers') -- Case: no conditions, just bucket id. diff --git a/test/integration/custom_bucket_id_test.lua b/test/integration/custom_bucket_id_test.lua index ae315f1c..b5ac4091 100644 --- a/test/integration/custom_bucket_id_test.lua +++ b/test/integration/custom_bucket_id_test.lua @@ -1,32 +1,23 @@ -local fio = require('fio') - local t = require('luatest') + local crud_utils = require('crud.common.utils') local helpers = require('test.helper') -local pgroup = t.group('custom_bucket_id', { +local pgroup = t.group('custom_bucket_id', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_simple_operations'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_simple_operations') g.space_format = g.cluster.servers[2].net_box.space.customers:format() end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/ddl_sharding_func_test.lua b/test/integration/ddl_sharding_func_test.lua index b43e7fa6..e8d49d66 100644 --- a/test/integration/ddl_sharding_func_test.lua +++ b/test/integration/ddl_sharding_func_test.lua @@ -1,4 +1,3 @@ -local fio = require('fio') local crud = require('crud') local t = require('luatest') @@ -9,34 +8,26 @@ if not ok then t.skip('Lua module ddl is required to run test') end -local pgroup = t.group('ddl_sharding_func', { +local pgroup = t.group('ddl_sharding_func', helpers.backend_matrix({ {engine = 'memtx', space_name = 'customers_G_func'}, {engine = 'memtx', space_name = 'customers_body_func'}, {engine = 'vinyl', space_name = 'customers_G_func'}, {engine = 'vinyl', space_name = 'customers_body_func'}, -}) +})) -local cache_group = t.group('ddl_sharding_func_cache', { +local cache_group = t.group('ddl_sharding_func_cache', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) -local vshard_group = t.group('ddl_vshard_sharding_func', { +local vshard_group = t.group('ddl_vshard_sharding_func', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) - -pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_ddl'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() +})) + +local function before_all(g) + helpers.start_default_cluster(g, 'srv_ddl') + local result, err = g.cluster.main_server.net_box:eval([[ local ddl = require('ddl') @@ -45,65 +36,30 @@ pgroup.before_all(function(g) ]]) t.assert_equals(type(result), 'table') t.assert_equals(err, nil) -end) +end + +local function after_all(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.before_all(before_all) +pgroup.after_all(after_all) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers_G_func') helpers.truncate_space_on_cluster(g.cluster, 'customers_body_func') end) -cache_group.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_ddl'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() - local result, err = g.cluster.main_server.net_box:eval([[ - local ddl = require('ddl') - - local ok, err = ddl.get_schema() - return ok, err - ]]) - t.assert_equals(type(result), 'table') - t.assert_equals(err, nil) -end) - -cache_group.after_all(function(g) helpers.stop_cluster(g.cluster) end) +cache_group.before_all(before_all) +cache_group.after_all(after_all) cache_group.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers_G_func') helpers.truncate_space_on_cluster(g.cluster, 'customers_body_func') end) -vshard_group.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_ddl'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() - local result, err = g.cluster.main_server.net_box:eval([[ - local ddl = require('ddl') - - local ok, err = ddl.get_schema() - return ok, err - ]]) - t.assert_equals(type(result), 'table') - t.assert_equals(err, nil) -end) - -vshard_group.after_all(function(g) helpers.stop_cluster(g.cluster) end) +vshard_group.before_all(before_all) +vshard_group.after_all(after_all) vshard_group.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers_vshard_mpcrc32') diff --git a/test/integration/ddl_sharding_info_reload_test.lua b/test/integration/ddl_sharding_info_reload_test.lua index b78bafc8..823cc590 100644 --- a/test/integration/ddl_sharding_info_reload_test.lua +++ b/test/integration/ddl_sharding_info_reload_test.lua @@ -1,4 +1,3 @@ -local fio = require('fio') local t = require('luatest') local sharding_utils = require('crud.common.sharding.utils') @@ -10,43 +9,34 @@ if not ok then t.skip('Lua module ddl is required to run test') end -local pgroup_storage = t.group('ddl_storage_sharding_info', { +local pgroup_storage = t.group('ddl_storage_sharding_info', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) -local pgroup_new_space = t.group('ddl_sharding_info_on_new_space', { +local pgroup_new_space = t.group('ddl_sharding_info_on_new_space', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) -local pgroup_key_change = t.group('ddl_sharding_key_reload_after_schema_change', { +local pgroup_key_change = t.group('ddl_sharding_key_reload_after_schema_change', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) -local pgroup_func_change = t.group('ddl_sharding_func_reload_after_schema_change', { +local pgroup_func_change = t.group('ddl_sharding_func_reload_after_schema_change', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) local select_limit = 100 local function start_cluster(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_ddl_reload'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() + helpers.start_default_cluster(g, 'srv_ddl_reload') end local function stop_cluster(g) - helpers.stop_cluster(g.cluster) + helpers.stop_cluster(g.cluster, g.params.backend) end pgroup_storage.before_all(start_cluster) @@ -229,6 +219,8 @@ pgroup_storage.test_gh_310_ddl_key_record_delete_removes_cache_entry = function( -- Drop space together with sharding info. local _, err = storage:eval([[ + local ddl = require('ddl') + local space_name = ... local current_schema, err = ddl.get_schema() @@ -263,6 +255,8 @@ pgroup_storage.test_gh_310_ddl_func_record_delete_removes_cache_entry = function -- Drop space together with sharding info. local _, err = storage:eval([[ + local ddl = require('ddl') + local space_name = ... local current_schema, err = ddl.get_schema() @@ -303,6 +297,7 @@ for sharding_case_name, sharding_case in pairs(sharding_cases) do reload_case_name, sharding_case_name) pgroup_storage[test_name] = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) t.skip_if( ((reload_case == 'reload_roles') and not helpers.is_cartridge_hotreload_supported()), @@ -334,6 +329,7 @@ for _, sharding_case in pairs(sharding_cases) do sharding_case.ddl_space, reload_case_name) pgroup_storage[test_name] = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) t.skip_if( ((reload_case == 'reload_roles') and not helpers.is_cartridge_hotreload_supported()), diff --git a/test/integration/ddl_sharding_key_test.lua b/test/integration/ddl_sharding_key_test.lua index ab8daaba..b12833a9 100644 --- a/test/integration/ddl_sharding_key_test.lua +++ b/test/integration/ddl_sharding_key_test.lua @@ -1,5 +1,5 @@ -local fio = require('fio') local crud = require('crud') + local t = require('luatest') local helpers = require('test.helper') @@ -9,22 +9,14 @@ if not ok then t.skip('Lua module ddl is required to run test') end -local pgroup = t.group('ddl_sharding_key', { +local pgroup = t.group('ddl_sharding_key', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_ddl'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() + helpers.start_default_cluster(g, 'srv_ddl') + local result, err = g.cluster.main_server.net_box:eval([[ local ddl = require('ddl') @@ -34,12 +26,15 @@ pgroup.before_all(function(g) t.assert_equals(type(result), 'table') t.assert_equals(err, nil) - g.cluster.main_server.net_box:eval([[ + g.router = helpers.get_router(g.cluster, g.params.backend) + g.router.net_box:eval([[ require('crud').cfg{ stats = true } ]]) end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') @@ -562,7 +557,7 @@ for name, case in pairs(cases) do pgroup[('test_%s_wont_lead_to_map_reduce'):format(name)] = function(g) case.prepare_data(g, case.space_name) - local router = g.cluster:server('router').net_box + local router = g.router.net_box local map_reduces_before = helpers.get_map_reduces_stat(router, case.space_name) local result, err = router:call('crud.select', { @@ -582,7 +577,7 @@ pgroup.test_select_for_part_of_sharding_key_will_lead_to_map_reduce = function(g local space_name = 'customers_name_age_key_different_indexes' prepare_data_name_age_sharding_key(g, space_name) - local router = g.cluster:server('router').net_box + local router = g.router.net_box local map_reduces_before = helpers.get_map_reduces_stat(router, space_name) local result, err = router:call('crud.select', { diff --git a/test/integration/insert_many_test.lua b/test/integration/insert_many_test.lua index aa947855..1e62ebdb 100644 --- a/test/integration/insert_many_test.lua +++ b/test/integration/insert_many_test.lua @@ -1,5 +1,3 @@ -local fio = require('fio') - local t = require('luatest') local crud = require('crud') @@ -7,26 +5,18 @@ local helpers = require('test.helper') local batching_utils = require('crud.common.batching_utils') -local pgroup = t.group('insert_many', { +local pgroup = t.group('insert_many', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_batch_operations'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_batch_operations') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/len_test.lua b/test/integration/len_test.lua index bd45bc14..a28fd397 100644 --- a/test/integration/len_test.lua +++ b/test/integration/len_test.lua @@ -1,28 +1,18 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') -local pgroup = t.group('len', { +local pgroup = t.group('len', helpers.backend_matrix({ {engine = 'memtx'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/migration_test.lua b/test/integration/migration_test.lua index eba14745..6d6d6b29 100644 --- a/test/integration/migration_test.lua +++ b/test/integration/migration_test.lua @@ -1,28 +1,20 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') +-- The migrations package requires cartridge as a dependency. local pgroup = t.group('migration', { - {engine = 'memtx'}, - {engine = 'vinyl'}, + {backend = helpers.backend.CARTRIDGE, engine = 'memtx'}, + {backend = helpers.backend.CARTRIDGE, engine = 'vinyl'}, }) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_migration'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() + helpers.start_default_cluster(g, 'srv_migration') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.test_gh_308_select_after_improper_ddl_space_drop = function(g) -- Create a space sharded by key with ddl tools. diff --git a/test/integration/pairs_readview_test.lua b/test/integration/pairs_readview_test.lua index baadaf6a..f3bb7085 100644 --- a/test/integration/pairs_readview_test.lua +++ b/test/integration/pairs_readview_test.lua @@ -1,33 +1,21 @@ -local fio = require('fio') - local t = require('luatest') local crud_utils = require('crud.common.utils') local helpers = require('test.helper') -local pgroup = t.group('pairs_readview', { +local pgroup = t.group('pairs_readview', helpers.backend_matrix({ {engine = 'memtx'}, -}) +})) pgroup.before_all(function(g) if (not helpers.tarantool_version_at_least(2, 11, 0)) or (not require('luatest.tarantool').is_enterprise_package()) then t.skip('Readview is supported only for Tarantool Enterprise starting from v2.11.0') - end - - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) + end - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() @@ -36,7 +24,9 @@ pgroup.before_all(function(g) ]]) end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') @@ -839,7 +829,7 @@ pgroup.test_pairs_no_map_reduce = function(g) table.sort(customers, function(obj1, obj2) return obj1.id < obj2.id end) - local router = g.cluster:server('router').net_box + local router = helpers.get_router(g.cluster, g.params.backend).net_box local map_reduces_before = helpers.get_map_reduces_stat(router, 'customers') -- Case: no conditions, just bucket id. diff --git a/test/integration/pairs_test.lua b/test/integration/pairs_test.lua index a0c90618..8585f091 100644 --- a/test/integration/pairs_test.lua +++ b/test/integration/pairs_test.lua @@ -1,28 +1,16 @@ -local fio = require('fio') - local t = require('luatest') local crud_utils = require('crud.common.utils') local helpers = require('test.helper') -local pgroup = t.group('pairs', { +local pgroup = t.group('pairs', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() @@ -31,7 +19,9 @@ pgroup.before_all(function(g) ]]) end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') @@ -838,7 +828,7 @@ pgroup.test_pairs_no_map_reduce = function(g) table.sort(customers, function(obj1, obj2) return obj1.id < obj2.id end) - local router = g.cluster:server('router').net_box + local router = helpers.get_router(g.cluster, g.params.backend).net_box local map_reduces_before = helpers.get_map_reduces_stat(router, 'customers') -- Case: no conditions, just bucket id. diff --git a/test/integration/read_calls_strategies_test.lua b/test/integration/read_calls_strategies_test.lua index ed96e282..eec8606f 100644 --- a/test/integration/read_calls_strategies_test.lua +++ b/test/integration/read_calls_strategies_test.lua @@ -1,8 +1,8 @@ -local fio = require('fio') - local t = require('luatest') -local pgroup = t.group('read_calls_strategies', { +local helpers = require('test.helper') + +local pgroup = t.group('read_calls_strategies', helpers.backend_matrix({ -- mode: write {exp_vshard_call = 'callrw', mode = 'write'}, @@ -24,19 +24,10 @@ local pgroup = t.group('read_calls_strategies', { -- prefer_replica, balance -> callbre {exp_vshard_call = 'callbre', prefer_replica = true, balance = true}, -}) - -local helpers = require('test.helper') +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_read_calls_strategies'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_read_calls_strategies') g.space_format = g.cluster.servers[2].net_box.space.customers:format() @@ -67,7 +58,7 @@ pgroup.before_all(function(g) end) pgroup.after_all(function(g) - helpers.stop_cluster(g.cluster) + helpers.stop_cluster(g.cluster, g.params.backend) end) pgroup.before_each(function(g) diff --git a/test/integration/readview_not_supported_test.lua b/test/integration/readview_not_supported_test.lua index a7ddab3d..fdada756 100644 --- a/test/integration/readview_not_supported_test.lua +++ b/test/integration/readview_not_supported_test.lua @@ -1,12 +1,10 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') -local pgroup = t.group('readview_not_supported', { +local pgroup = t.group('readview_not_supported', helpers.backend_matrix({ {engine = 'memtx'}, -}) +})) pgroup.before_all(function(g) @@ -14,22 +12,15 @@ pgroup.before_all(function(g) and require('luatest.tarantool').is_enterprise_package() then t.skip("Readview is supported") end - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.test_open = function(g) local obj, err = g.cluster.main_server.net_box:eval([[ diff --git a/test/integration/replace_many_test.lua b/test/integration/replace_many_test.lua index adcdd3e3..3968841e 100644 --- a/test/integration/replace_many_test.lua +++ b/test/integration/replace_many_test.lua @@ -1,32 +1,23 @@ -local fio = require('fio') - local t = require('luatest') + local crud = require('crud') local helpers = require('test.helper') local batching_utils = require('crud.common.batching_utils') -local pgroup = t.group('replace_many', { +local pgroup = t.group('replace_many', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_batch_operations'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_batch_operations') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'developers') diff --git a/test/integration/select_readview_test.lua b/test/integration/select_readview_test.lua index bfa8c04a..34473409 100644 --- a/test/integration/select_readview_test.lua +++ b/test/integration/select_readview_test.lua @@ -1,4 +1,3 @@ -local fio = require('fio') local fiber = require('fiber') local t = require('luatest') @@ -9,39 +8,35 @@ local crud_utils = require('crud.common.utils') local helpers = require('test.helper') -local pgroup = t.group('select_readview', { +local pgroup = t.group('select_readview', helpers.backend_matrix({ {engine = 'memtx'}, -}) +})) - -pgroup.before_all(function(g) - if (not helpers.tarantool_version_at_least(2, 11, 0)) - or (not require('luatest.tarantool').is_enterprise_package()) then - t.skip('Readview is supported only for Tarantool Enterprise starting from v2.11.0') - end - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() +local function init_cluster(g) + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() - g.cluster:server('router').net_box:eval([[ + g.router = helpers.get_router(g.cluster, g.params.backend) + g.router.net_box:eval([[ require('crud').cfg{ stats = true } ]]) - g.cluster:server('router').net_box:eval([[ + g.router.net_box:eval([[ require('crud.ratelimit').disable() ]]) +end + +pgroup.before_all(function(g) + if (not helpers.tarantool_version_at_least(2, 11, 0)) + or (not require('luatest.tarantool').is_enterprise_package()) then + t.skip('Readview is supported only for Tarantool Enterprise starting from v2.11.0') + end + init_cluster(g) end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') @@ -2211,7 +2206,7 @@ pgroup.test_select_no_map_reduce = function(g) table.sort(customers, function(obj1, obj2) return obj1.id < obj2.id end) - local router = g.cluster:server('router').net_box + local router = g.router.net_box local map_reduces_before = helpers.get_map_reduces_stat(router, 'customers') -- Case: no conditions, just bucket id. @@ -2309,9 +2304,15 @@ pgroup.test_stop_select = function(g) local result, err = foo:select('customers', nil, {fullscan = true}) return result, err ]]) - t.assert_str_contains(err.err, 'Connection refused') + t.assert_error(err.err) g.cluster:server('s2-master'):start() + if g.params.backend == helpers.backend.VSHARD then + g.cluster:server('s2-master'):exec(function() + require('crud').init_storage() + end) + end + local _, err = g.cluster.main_server.net_box:eval([[ local crud = require('crud') local foo = rawget(_G, 'foo', foo) @@ -2321,7 +2322,19 @@ pgroup.test_stop_select = function(g) t.assert_equals(err, nil) end +pgroup.after_test('test_stop_select', function(g) + -- It seems more easy to restart the cluster rather then restore it + -- original state. + if g.params.backend == helpers.backend.VSHARD then + helpers.stop_cluster(g.cluster, g.params.backend) + g.cluster = nil + init_cluster(g) + end +end) + pgroup.test_select_switch_master = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) + helpers.insert_objects(g, 'customers', { { id = 1, name = "Elizabeth", last_name = "Jackson", @@ -2349,7 +2362,7 @@ pgroup.test_select_switch_master = function(g) ]]) t.assert_equals(err, nil) - local replicasets = helpers.get_test_replicasets() + local replicasets = helpers.get_test_cartridge_replicasets() set_master(g.cluster, replicasets[2].uuid, replicasets[2].servers[2].instance_uuid) local obj, err = g.cluster.main_server.net_box:eval([[ @@ -2371,7 +2384,10 @@ pgroup.test_select_switch_master = function(g) end +-- TODO: https://github.com/tarantool/crud/issues/383 pgroup.test_select_switch_master_first = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) + local customers = helpers.insert_objects(g, 'customers', { { id = 1, name = "Elizabeth", last_name = "Jackson", @@ -2402,7 +2418,7 @@ pgroup.test_select_switch_master_first = function(g) local objects = crud.unflatten_rows(obj.rows, obj.metadata) t.assert_equals(objects, helpers.get_objects_by_idxs(customers, {1, 2})) - local replicasets = helpers.get_test_replicasets() + local replicasets = helpers.get_test_cartridge_replicasets() set_master(g.cluster, replicasets[3].uuid, replicasets[3].servers[2].instance_uuid) local obj, err = g.cluster.main_server.net_box:eval([[ @@ -2424,6 +2440,7 @@ pgroup.test_select_switch_master_first = function(g) end +-- TODO: https://github.com/tarantool/crud/issues/383 pgroup.test_select_closed_readview = function(g) helpers.insert_objects(g, 'customers', { { diff --git a/test/integration/select_test.lua b/test/integration/select_test.lua index 6588aa3b..09900b3b 100644 --- a/test/integration/select_test.lua +++ b/test/integration/select_test.lua @@ -1,42 +1,32 @@ -local fio = require('fio') - local t = require('luatest') -local luatest_capture = require('luatest.capture') local crud = require('crud') local crud_utils = require('crud.common.utils') local helpers = require('test.helper') -local pgroup = t.group('select', { +local pgroup = t.group('select', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() - g.cluster:server('router').net_box:eval([[ + g.router = helpers.get_router(g.cluster, g.params.backend) + g.router.net_box:eval([[ require('crud').cfg{ stats = true } ]]) - g.cluster:server('router').net_box:eval([[ + g.router.net_box:eval([[ require('crud.ratelimit').disable() ]]) end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') @@ -160,8 +150,6 @@ for name, case in pairs(select_safety_cases) do pgroup[test_name] = function(g) local uc = case.user_conditions local opts = case.opts - local capture = luatest_capture:new() - capture:enable() if opts ~= nil and opts.first ~= nil and opts.first < 0 then local after_tuple = { @@ -171,18 +159,15 @@ for name, case in pairs(select_safety_cases) do opts.after = crud_utils.flatten(after_tuple, g.space_format) end - local _, err = g.cluster.main_server.net_box:call('crud.select', {space, uc, opts}) + local captured, err = helpers.get_command_log(g.router, + g.params.backend, 'crud.select', {space, uc, opts}) t.assert_equals(err, nil) - local captured = helpers.fflush_main_server_stdout(g.cluster, capture) - if case.has_crit then t.assert_str_contains(captured, crit_log) else t.assert_equals(string.find(captured, crit_log, 1, true), nil) end - - capture:disable() end end @@ -1889,7 +1874,7 @@ pgroup.test_select_no_map_reduce = function(g) table.sort(customers, function(obj1, obj2) return obj1.id < obj2.id end) - local router = g.cluster:server('router').net_box + local router = g.router.net_box local map_reduces_before = helpers.get_map_reduces_stat(router, 'customers') -- Case: no conditions, just bucket id. diff --git a/test/integration/simple_operations_test.lua b/test/integration/simple_operations_test.lua index 66a3720b..ccdca662 100644 --- a/test/integration/simple_operations_test.lua +++ b/test/integration/simple_operations_test.lua @@ -1,30 +1,20 @@ -local fio = require('fio') - local t = require('luatest') local crud = require('crud') local helpers = require('test.helper') -local pgroup = t.group('simple_operations', { +local pgroup = t.group('simple_operations', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_simple_operations'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_simple_operations') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/stats_test.lua b/test/integration/stats_test.lua index 812ff2d3..c54ff47c 100644 --- a/test/integration/stats_test.lua +++ b/test/integration/stats_test.lua @@ -1,38 +1,41 @@ -local fio = require('fio') local clock = require('clock') +local helpers = require('test.helper') local t = require('luatest') local stats_registry_utils = require('crud.stats.registry_utils') -local pgroup = t.group('stats_integration', { +local matrix = helpers.backend_matrix({ { way = 'call', args = { driver = 'local' }}, { way = 'call', args = { driver = 'metrics', quantiles = false }}, { way = 'call', args = { driver = 'metrics', quantiles = true }}, - { way = 'role', args = { driver = 'local' }}, - { way = 'role', args = { driver = 'metrics', quantiles = false }}, - { way = 'role', args = { driver = 'metrics', quantiles = true }}, }) -local group_metrics = t.group('stats_metrics_integration', { - { way = 'call', args = { driver = 'metrics', quantiles = false }}, - { way = 'role', args = { driver = 'metrics', quantiles = true }}, +table.insert(matrix, {backend = helpers.backend.CARTRIDGE, + way = 'role', args = { driver = 'local' }, +}) +table.insert(matrix, {backend = helpers.backend.CARTRIDGE, + way = 'role', args = { driver = 'metrics', quantiles = false }, }) +table.insert(matrix, {backend = helpers.backend.CARTRIDGE, + way = 'role', args = { driver = 'metrics', quantiles = true }, +}) +local pgroup = t.group('stats_integration', matrix) -local helpers = require('test.helper') +matrix = helpers.backend_matrix({ + { way = 'call', args = { driver = 'metrics', quantiles = false }}, +}) +table.insert(matrix, {backend = helpers.backend.CARTRIDGE, + way = 'role', args = { driver = 'metrics', quantiles = true }, +}) +local group_metrics = t.group('stats_metrics_integration', matrix) local space_name = 'customers' local non_existing_space_name = 'non_existing_space' local new_space_name = 'newspace' local function before_all(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_stats'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - }) - g.cluster:start() - g.router = g.cluster:server('router').net_box + helpers.start_default_cluster(g, 'srv_stats') + g.router = helpers.get_router(g.cluster, g.params.backend).net_box if g.params.args.driver == 'metrics' then local is_metrics_supported = g.router:eval([[ return require('crud.stats.metrics_registry').is_supported() @@ -42,7 +45,7 @@ local function before_all(g) end local function after_all(g) - helpers.stop_cluster(g.cluster) + helpers.stop_cluster(g.cluster, g.params.backend) end local function get_stats(g, space_name) @@ -855,11 +858,13 @@ pgroup.before_test( generate_stats) pgroup.test_role_reload_do_not_reset_observations = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) t.skip_if(not helpers.is_cartridge_hotreload_supported(), "Cartridge roles reload is not supported") t.skip_if((g.params.args.driver == 'metrics') and helpers.is_metrics_0_12_0_or_older(), "See https://github.com/tarantool/metrics/issues/334") + helpers.skip_old_tarantool_cartridge_hotreload() local stats_before = get_stats(g) @@ -876,6 +881,8 @@ pgroup.before_test( generate_stats) pgroup.test_module_reload_do_not_reset_observations = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) + local stats_before = get_stats(g) helpers.reload_package(g.cluster:server('router')) @@ -1171,6 +1178,7 @@ group_metrics.before_test( generate_stats) group_metrics.test_role_reload_do_not_reset_metrics_observations = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) t.skip_if(not helpers.is_cartridge_hotreload_supported(), "Cartridge roles reload is not supported") t.skip_if(helpers.is_metrics_0_12_0_or_older(), @@ -1213,6 +1221,7 @@ group_metrics.before_test( prepare_select_data) group_metrics.test_stats_changed_in_metrics_registry_after_role_reload = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) t.skip_if(not helpers.is_cartridge_hotreload_supported(), "Cartridge roles reload is not supported") helpers.skip_old_tarantool_cartridge_hotreload() diff --git a/test/integration/storages_state_test.lua b/test/integration/storages_state_test.lua index 015fd664..172baabc 100644 --- a/test/integration/storages_state_test.lua +++ b/test/integration/storages_state_test.lua @@ -1,14 +1,12 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') local fiber = require("fiber") -local pgroup = t.group('storage_info', { +local pgroup = t.group('storage_info', helpers.backend_matrix({ {engine = 'memtx'} -}) +})) -- Waits for storages to initialize. -- This is a workaround for "peer closed" errors for some connections right after the cluster start. @@ -40,16 +38,7 @@ local function wait_storages_init(g) end pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') end) pgroup.before_each(function(g) @@ -57,8 +46,7 @@ pgroup.before_each(function(g) end) pgroup.after_all(function(g) - helpers.stop_cluster(g.cluster) - fio.rmtree(g.cluster.datadir) + helpers.stop_cluster(g.cluster, g.params.backend) end) pgroup.test_crud_storage_status_of_stopped_servers = function(g) @@ -71,7 +59,7 @@ pgroup.test_crud_storage_status_of_stopped_servers = function(g) status = "running", is_master = true }, - [helpers.uuid('b', 2)] = { + [helpers.uuid('b', 10)] = { status = "running", is_master = false }, @@ -79,7 +67,7 @@ pgroup.test_crud_storage_status_of_stopped_servers = function(g) status = "running", is_master = true }, - [helpers.uuid('c', 2)] = { + [helpers.uuid('c', 10)] = { status = "error", is_master = false, message = "Peer closed" @@ -89,9 +77,14 @@ end pgroup.after_test('test_crud_storage_status_of_stopped_servers', function(g) g.cluster:server("s2-replica"):start() + g.cluster:server("s2-replica"):exec(function() + require('crud').init_storage() + end) end) pgroup.test_disabled_storage_role = function(g) + helpers.skip_not_cartridge_backend(g.params.backend) + -- stop crud storage role on one replica local server = g.cluster:server("s1-replica") local results = server.net_box:eval([[ @@ -110,7 +103,7 @@ pgroup.test_disabled_storage_role = function(g) status = "running", is_master = true }, - [helpers.uuid('b', 2)] = { + [helpers.uuid('b', 10)] = { status = "uninitialized", is_master = false }, @@ -118,7 +111,7 @@ pgroup.test_disabled_storage_role = function(g) status = "running", is_master = true }, - [helpers.uuid('c', 2)] = { + [helpers.uuid('c', 10)] = { status = "running", is_master = false } @@ -152,7 +145,7 @@ pgroup.test_storage_call_failure = function(g) status = "running", is_master = true }, - [helpers.uuid('b', 2)] = { + [helpers.uuid('b', 10)] = { status = "running", is_master = false }, @@ -160,7 +153,7 @@ pgroup.test_storage_call_failure = function(g) status = "running", is_master = true }, - [helpers.uuid('c', 2)] = { + [helpers.uuid('c', 10)] = { status = "error", is_master = false, message = "attempt to call a table value" diff --git a/test/integration/truncate_test.lua b/test/integration/truncate_test.lua index 9cfb306c..b766dd96 100644 --- a/test/integration/truncate_test.lua +++ b/test/integration/truncate_test.lua @@ -1,31 +1,21 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') -local pgroup = t.group('truncate', { +local pgroup = t.group('truncate', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_select'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_select') g.space_format = g.cluster.servers[2].net_box.space.customers:format() end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git "a/test/integration/updated_s\321\201hema_test.lua" b/test/integration/updated_schema_test.lua similarity index 98% rename from "test/integration/updated_s\321\201hema_test.lua" rename to test/integration/updated_schema_test.lua index 6803efd1..2077a0ec 100644 --- "a/test/integration/updated_s\321\201hema_test.lua" +++ b/test/integration/updated_schema_test.lua @@ -1,5 +1,3 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') @@ -8,26 +6,18 @@ local fiber = require('fiber') local crud = require('crud') local crud_utils = require('crud.common.utils') -local pgroup = t.group('updated_schema', { +local pgroup = t.group('updated_schema', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_update_schema'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_update_schema') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.drop_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/upsert_many_test.lua b/test/integration/upsert_many_test.lua index 76749476..7a0afd1e 100644 --- a/test/integration/upsert_many_test.lua +++ b/test/integration/upsert_many_test.lua @@ -1,31 +1,21 @@ -local fio = require('fio') - local t = require('luatest') local helpers = require('test.helper') local batching_utils = require('crud.common.batching_utils') -local pgroup = t.group('upsert_many', { +local pgroup = t.group('upsert_many', helpers.backend_matrix({ {engine = 'memtx'}, {engine = 'vinyl'}, -}) +})) pgroup.before_all(function(g) - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_batch_operations'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - env = { - ['ENGINE'] = g.params.engine, - }, - }) - - g.cluster:start() + helpers.start_default_cluster(g, 'srv_batch_operations') end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') diff --git a/test/integration/vshard_custom_test.lua b/test/integration/vshard_custom_test.lua index c5801b7a..e8b02672 100644 --- a/test/integration/vshard_custom_test.lua +++ b/test/integration/vshard_custom_test.lua @@ -12,9 +12,11 @@ local pgroup = t.group('vshard_custom', { }) pgroup.before_all(function(g) + helpers.skip_cartridge_unsupported() + g.cluster = helpers.Cluster:new({ datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_vshard_custom'), + server_command = helpers.entrypoint_cartridge('srv_vshard_custom'), use_vshard = true, replicasets = { { @@ -86,7 +88,7 @@ pgroup.before_all(function(g) g.router = g.cluster:server('router').net_box end) -pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) +pgroup.after_all(function(g) helpers.stop_cartridge_cluster(g.cluster) end) pgroup.before_all(function(g) g.router:eval([[ diff --git a/test/performance/perf_test.lua b/test/performance/perf_test.lua index 4401ba06..aa01647f 100644 --- a/test/performance/perf_test.lua +++ b/test/performance/perf_test.lua @@ -7,10 +7,10 @@ local log = require('log') local fun = require('fun') local t = require('luatest') -local g = t.group('perf') local helpers = require('test.helper') +local g = t.group('perf', helpers.backend_matrix()) local id = 0 local function gen() @@ -22,6 +22,81 @@ local function reset_gen() id = 0 end +local vshard_cfg_template = { + sharding = { + { + replicas = { + ['s1-master'] = { + master = true, + }, + ['s1-replica'] = {}, + }, + }, + { + replicas = { + ['s2-master'] = { + master = true, + }, + ['s2-replica'] = {}, + }, + }, + { + replicas = { + ['s3-master'] = { + master = true, + }, + ['s3-replica'] = {}, + }, + }, + }, + bucket_count = 3000, + storage_init = helpers.entrypoint_vshard_storage('srv_ddl'), + crud_init = true, +} + +local cartridge_cfg_template = { + datadir = fio.tempdir(), + server_command = helpers.entrypoint_cartridge('srv_ddl'), + use_vshard = true, + replicasets = { + { + uuid = helpers.uuid('a'), + alias = 'router', + roles = { 'crud-router' }, + servers = { + { instance_uuid = helpers.uuid('a', 1), alias = 'router' }, + }, + }, + { + uuid = helpers.uuid('b'), + alias = 's-1', + roles = { 'customers-storage', 'crud-storage' }, + servers = { + { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, + { instance_uuid = helpers.uuid('b', 2), alias = 's1-replica' }, + }, + }, + { + uuid = helpers.uuid('c'), + alias = 's-2', + roles = { 'customers-storage', 'crud-storage' }, + servers = { + { instance_uuid = helpers.uuid('c', 1), alias = 's2-master' }, + { instance_uuid = helpers.uuid('c', 2), alias = 's2-replica' }, + }, + }, + { + uuid = helpers.uuid('d'), + alias = 's-3', + roles = { 'customers-storage', 'crud-storage' }, + servers = { + { instance_uuid = helpers.uuid('d', 1), alias = 's3-master' }, + { instance_uuid = helpers.uuid('d', 2), alias = 's3-replica' }, + }, + } + }, +} + g.before_all(function(g) -- Run real perf tests only with flag, otherwise run short version -- to test compatibility as part of unit/integration test run. @@ -32,51 +107,9 @@ g.before_all(function(g) helpers.disable_dev_checks() end - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_ddl'), - use_vshard = true, - replicasets = { - { - uuid = helpers.uuid('a'), - alias = 'router', - roles = { 'crud-router' }, - servers = { - { instance_uuid = helpers.uuid('a', 1), alias = 'router' }, - }, - }, - { - uuid = helpers.uuid('b'), - alias = 's-1', - roles = { 'customers-storage', 'crud-storage' }, - servers = { - { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, - { instance_uuid = helpers.uuid('b', 2), alias = 's1-replica' }, - }, - }, - { - uuid = helpers.uuid('c'), - alias = 's-2', - roles = { 'customers-storage', 'crud-storage' }, - servers = { - { instance_uuid = helpers.uuid('c', 1), alias = 's2-master' }, - { instance_uuid = helpers.uuid('c', 2), alias = 's2-replica' }, - }, - }, - { - uuid = helpers.uuid('d'), - alias = 's-3', - roles = { 'customers-storage', 'crud-storage' }, - servers = { - { instance_uuid = helpers.uuid('d', 1), alias = 's3-master' }, - { instance_uuid = helpers.uuid('d', 2), alias = 's3-replica' }, - }, - } - }, - }) - g.cluster:start() + helpers.start_cluster(g, cartridge_cfg_template, vshard_cfg_template) - g.router = g.cluster:server('router').net_box + g.router = helpers.get_router(g.cluster, g.params.backend).net_box g.router:eval([[ rawset(_G, 'crud', require('crud')) @@ -259,8 +292,7 @@ g.after_each(function(g) end) g.after_all(function(g) - g.cluster:stop() - fio.rmtree(g.cluster.datadir) + helpers.stop_cluster(g.cluster, g.params.backend) visualize_report(g.total_report, 'STATISTICS PERFORMANCE REPORT', { columns = { @@ -1111,7 +1143,7 @@ for name, case in pairs(cases) do local connections = {} - local router = g.cluster:server('router') + local router = helpers.get_router(g.cluster, g.params.backend) for _ = 1, params.connection_count do local c = net_box:connect(router.net_box_uri, router.net_box_credentials) if c == nil then diff --git a/test/unit/call_test.lua b/test/unit/call_test.lua index e6a93880..3a5f16a4 100644 --- a/test/unit/call_test.lua +++ b/test/unit/call_test.lua @@ -1,45 +1,71 @@ local fio = require('fio') local t = require('luatest') -local g = t.group('call') local helpers = require('test.helper') -g.before_all = function() - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_say_hi'), - use_vshard = true, - replicasets = { - { - uuid = helpers.uuid('a'), - alias = 'router', - roles = { 'crud-router' }, - servers = { - { instance_uuid = helpers.uuid('a', 1), alias = 'router' }, +local pgroup = t.group('call', helpers.backend_matrix()) + +local vshard_cfg_template = { + sharding = { + { + replicas = { + ['s1-master'] = { + master = true, }, + ['s1-replica'] = {}, }, - { - uuid = helpers.uuid('b'), - alias = 's-1', - roles = { 'crud-storage' }, - servers = { - { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, - { instance_uuid = helpers.uuid('b', 2), alias = 's1-replica' }, + }, + { + replicas = { + ['s2-master'] = { + master = true, }, + ['s2-replica'] = {}, }, - { - uuid = helpers.uuid('c'), - alias = 's-2', - roles = { 'crud-storage' }, - servers = { - { instance_uuid = helpers.uuid('c', 1), alias = 's2-master' }, - { instance_uuid = helpers.uuid('c', 2), alias = 's2-replica' }, - }, - } }, - }) - g.cluster:start() + }, + bucket_count = 3000, + all_init = helpers.entrypoint_vshard_all('srv_say_hi'), + crud_init = true, +} + +local cartridge_cfg_template = { + datadir = fio.tempdir(), + server_command = helpers.entrypoint_cartridge('srv_say_hi'), + use_vshard = true, + replicasets = { + { + uuid = helpers.uuid('a'), + alias = 'router', + roles = { 'crud-router' }, + servers = { + { instance_uuid = helpers.uuid('a', 1), alias = 'router' }, + }, + }, + { + uuid = helpers.uuid('b'), + alias = 's-1', + roles = { 'crud-storage' }, + servers = { + { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, + { instance_uuid = helpers.uuid('b', 2), alias = 's1-replica' }, + }, + }, + { + uuid = helpers.uuid('c'), + alias = 's-2', + roles = { 'crud-storage' }, + servers = { + { instance_uuid = helpers.uuid('c', 1), alias = 's2-master' }, + { instance_uuid = helpers.uuid('c', 2), alias = 's2-replica' }, + }, + } + }, +} + +pgroup.before_all(function(g) + helpers.start_cluster(g, cartridge_cfg_template, vshard_cfg_template) g.clear_vshard_calls = function() g.cluster.main_server.net_box:call('clear_vshard_calls') @@ -52,14 +78,13 @@ g.before_all = function() -- patch vshard.router.call* functions local vshard_call_names = {'callro', 'callbro', 'callre', 'callbre', 'callrw'} g.cluster.main_server.net_box:call('patch_vshard_calls', {vshard_call_names}) -end +end) -g.after_all = function() - g.cluster:stop() - fio.rmtree(g.cluster.datadir) -end +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) -g.test_map_non_existent_func = function() +pgroup.test_map_non_existent_func = function(g) local results, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') local call = require('crud.common.call') @@ -68,11 +93,11 @@ g.test_map_non_existent_func = function() ]]) t.assert_equals(results, nil) - t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-000000000000", true) + t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-00000000000%d", true) t.assert_str_contains(err.err, "Function non_existent_func is not registered") end -g.test_single_non_existent_func = function() +pgroup.test_single_non_existent_func = function(g) local results, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') local call = require('crud.common.call') @@ -81,11 +106,11 @@ g.test_single_non_existent_func = function() ]]) t.assert_equals(results, nil) - t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-000000000000", true) + t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-00000000000%d", true) t.assert_str_contains(err.err, "Function non_existent_func is not registered") end -g.test_map_invalid_mode = function() +pgroup.test_map_invalid_mode = function(g) local results, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') local call = require('crud.common.call') @@ -97,7 +122,7 @@ g.test_map_invalid_mode = function() t.assert_str_contains(err.err, "Unknown call mode: invalid") end -g.test_single_invalid_mode = function() +pgroup.test_single_invalid_mode = function(g) local results, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') local call = require('crud.common.call') @@ -109,7 +134,7 @@ g.test_single_invalid_mode = function() t.assert_str_contains(err.err, "Unknown call mode: invalid") end -g.test_map_no_args = function() +pgroup.test_map_no_args = function(g) local results_map, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') local call = require('crud.common.call') @@ -120,10 +145,10 @@ g.test_map_no_args = function() t.assert_equals(err, nil) local results = helpers.get_results_list(results_map) t.assert_equals(#results, 2) - t.assert_items_include(results, {{"HI, handsome! I am s1-master"}, {"HI, handsome! I am s2-master"}}) + t.assert_items_include(results, {{"HI, handsome! I am 1"}, {"HI, handsome! I am 1"}}) end -g.test_args = function() +pgroup.test_args = function(g) local results_map, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') local call = require('crud.common.call') @@ -134,10 +159,10 @@ g.test_args = function() t.assert_equals(err, nil) local results = helpers.get_results_list(results_map) t.assert_equals(#results, 2) - t.assert_items_include(results, {{"HI, dokshina! I am s1-master"}, {"HI, dokshina! I am s2-master"}}) + t.assert_items_include(results, {{"HI, dokshina! I am 1"}, {"HI, dokshina! I am 1"}}) end -g.test_timeout = function() +pgroup.test_timeout = function(g) local timeout = 0.2 local results, err = g.cluster.main_server.net_box:eval([[ @@ -153,7 +178,7 @@ g.test_timeout = function() ]], {timeout + 0.1, timeout}) t.assert_equals(results, nil) - t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-000000000000", true) + t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-00000000000%d", true) helpers.assert_timeout_error(err.err) end @@ -185,7 +210,7 @@ local function check_map_vshard_call(g, exp_vshard_call, opts) t.assert_equals(vshard_calls, {exp_vshard_call, exp_vshard_call}) end -g.test_single_vshard_calls = function() +pgroup.test_single_vshard_calls = function(g) -- mode: write check_single_vshard_call(g, 'callrw', { @@ -224,7 +249,7 @@ g.test_single_vshard_calls = function() }) end -g.test_map_vshard_calls = function() +pgroup.test_map_vshard_calls = function(g) -- mode: write check_map_vshard_call(g, 'callrw', { @@ -263,7 +288,7 @@ g.test_map_vshard_calls = function() }) end -g.test_any_vshard_call = function() +pgroup.test_any_vshard_call = function(g) g.clear_vshard_calls() local results, err = g.cluster.main_server.net_box:eval([[ local vshard = require('vshard') @@ -272,11 +297,11 @@ g.test_any_vshard_call = function() return call.any(vshard.router.static, 'say_hi_politely', {'dude'}, {}) ]]) - t.assert_equals(results, 'HI, dude! I am s2-master') + t.assert_equals(results, 'HI, dude! I am 1') t.assert_equals(err, nil) end -g.test_any_vshard_call_timeout = function() +pgroup.test_any_vshard_call_timeout = function(g) local timeout = 0.2 local results, err = g.cluster.main_server.net_box:eval([[ @@ -291,6 +316,6 @@ g.test_any_vshard_call_timeout = function() ]], {timeout + 0.1, timeout}) t.assert_equals(results, nil) - t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-000000000000", true) + t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-00000000000%d", true) helpers.assert_timeout_error(err.err) end diff --git a/test/unit/not_initialized_test.lua b/test/unit/not_initialized_test.lua index 60ebcb5b..d7f16657 100644 --- a/test/unit/not_initialized_test.lua +++ b/test/unit/not_initialized_test.lua @@ -1,49 +1,64 @@ local fio = require('fio') - +local helpers = require('test.helper') local t = require('luatest') -local g = t.group('not-initialized') -local helpers = require('test.helper') +local pgroup = t.group('not-initialized', helpers.backend_matrix({ + {}, +})) -g.before_all = function() - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_not_initialized'), - use_vshard = true, - replicasets = { - { - uuid = helpers.uuid('a'), - alias = 'router', - roles = { 'vshard-router' }, - servers = { - { instance_uuid = helpers.uuid('a', 1), alias = 'router' }, +local vshard_cfg_template = { + sharding = { + { + replicas = { + storage = { + master = true, }, }, - { - uuid = helpers.uuid('b'), - alias = 's-1', - roles = { 'customers-storage' }, - servers = { - { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, - }, + }, + }, + bucket_count = 20, + storage_init = helpers.entrypoint_vshard_storage('srv_not_initialized'), +} + +local cartridge_cfg_template = { + datadir = fio.tempdir(), + server_command = helpers.entrypoint_cartridge('srv_not_initialized'), + use_vshard = true, + replicasets = { + { + uuid = helpers.uuid('a'), + alias = 'router', + roles = { 'vshard-router' }, + servers = { + { instance_uuid = helpers.uuid('a', 1), alias = 'router' }, }, }, - }) - g.cluster:start() -end + { + uuid = helpers.uuid('b'), + alias = 's-1', + roles = { 'customers-storage' }, + servers = { + { instance_uuid = helpers.uuid('b', 1), alias = 's1-master' }, + }, + }, + }, +} -g.after_all = function() - g.cluster:stop() - fio.rmtree(g.cluster.datadir) -end +pgroup.before_all(function(g) + helpers.start_cluster(g, cartridge_cfg_template, vshard_cfg_template) +end) + +pgroup.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) -g.test_insert = function() +pgroup.test_insert = function(g) local results, err = g.cluster.main_server.net_box:eval([[ local crud = require('crud') return crud.insert('customers', {id = 1, name = 'Fedor', age = 15}) ]]) t.assert_equals(results, nil) - t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-000000000000", true) + t.assert_str_contains(err.err, "Failed for %w+%-0000%-0000%-0000%-00000000000%d", true) t.assert_str_contains(err.err, "crud isn't initialized on replicaset") end diff --git a/test/unit/select_filters_test.lua b/test/unit/select_filters_test.lua index c2cb7c1e..a8272c8e 100644 --- a/test/unit/select_filters_test.lua +++ b/test/unit/select_filters_test.lua @@ -789,7 +789,7 @@ return M]] local filter_func = select_filters.internal.compile(filter_code) t.assert_equals({ filter_func(box.tuple.new({1, 2, {f2 = {f3 = "a"}}, {f3 = "b"}})) }, {true, false}) - t.assert_equals({ filter_func(box.tuple.new({1, 2, {f3 = "b"}}, {f2 = {f3 = "a"}})) }, {false, true}) + t.assert_equals({ filter_func(box.tuple.new({1, 2, {f3 = "b"}, {f2 = {f3 = "a"}}})) }, {false, true}) t.assert_equals({ filter_func(box.tuple.new({1, 2, {f2 = {f3 = "a"}}, "b"})) }, {false, true}) end diff --git a/test/unit/sharding_metadata_test.lua b/test/unit/sharding_metadata_test.lua index ad321d1e..be2ce2a3 100644 --- a/test/unit/sharding_metadata_test.lua +++ b/test/unit/sharding_metadata_test.lua @@ -45,14 +45,27 @@ g.before_each(function() box.schema.space.create('fetch_on_storage') end) +-- Since Tarantool 3.0 triggers still live after a space drop. To properly +-- clean up for the unit tests we need to remove all triggers from +-- the space. This is necessary because `crud` adds its own triggers to the +-- `ddl` spaces. +-- +-- In practice `ddl` does not drop this spaces so it is the tests problem. +local function drop_ddl_space(space) + for _, t in pairs(space:on_replace()) do + space:on_replace(nil, t) + end + space:drop() +end + g.after_each(function() -- Cleanup. if box.space._ddl_sharding_key ~= nil then - box.space._ddl_sharding_key:drop() + drop_ddl_space(box.space._ddl_sharding_key) end if box.space._ddl_sharding_func ~= nil then - box.space._ddl_sharding_func:drop() + drop_ddl_space(box.space._ddl_sharding_func) end box.space.fetch_on_storage:drop() diff --git a/test/unit/stats_test.lua b/test/unit/stats_test.lua index b8899a92..a66bf8ad 100644 --- a/test/unit/stats_test.lua +++ b/test/unit/stats_test.lua @@ -1,30 +1,24 @@ local clock = require('clock') -local fio = require('fio') local fun = require('fun') +local helpers = require('test.helper') local t = require('luatest') local stats_module = require('crud.stats') -local pgroup = t.group('stats_unit', { +local pgroup = t.group('stats_unit', helpers.backend_matrix({ { driver = 'local' }, { driver = 'metrics', quantiles = false }, { driver = 'metrics', quantiles = true }, -}) -local group_driver = t.group('stats_driver_unit') -local helpers = require('test.helper') +})) +local group_driver = t.group('stats_driver_unit', helpers.backend_matrix({ + {}, +})) local space_name = 'customers' local function before_all(g) - -- Enable test cluster for "is space exist?" checks. - g.cluster = helpers.Cluster:new({ - datadir = fio.tempdir(), - server_command = helpers.entrypoint('srv_stats'), - use_vshard = true, - replicasets = helpers.get_test_replicasets(), - }) - g.cluster:start() - g.router = g.cluster:server('router').net_box + helpers.start_default_cluster(g, 'srv_stats') + g.router = helpers.get_router(g.cluster, g.params.backend).net_box helpers.prepare_simple_functions(g.router) g.router:eval("stats_module = require('crud.stats')") @@ -39,7 +33,7 @@ local function before_all(g) end local function after_all(g) - helpers.stop_cluster(g.cluster) + helpers.stop_cluster(g.cluster, g.params.backend) end local function get_stats(g, space_name) @@ -48,6 +42,10 @@ end local function enable_stats(g, params) params = params or g.params + if params ~= nil then + params = table.deepcopy(params) + params.backend = nil + end g.router:eval("stats_module.enable(...)", { params }) end diff --git a/test/vshard_helpers/cluster.lua b/test/vshard_helpers/cluster.lua new file mode 100644 index 00000000..dee73699 --- /dev/null +++ b/test/vshard_helpers/cluster.lua @@ -0,0 +1,113 @@ +local fio = require('fio') +local Server = require('test.vshard_helpers.server') + +local root = os.environ()['SOURCEDIR'] or '.' + +local Cluster = {} + +function Cluster:new(object) + self:inherit(object) + object:initialize() + self.servers = object.servers + self.built_servers = object.built_servers + return object +end + +function Cluster:inherit(object) + object = object or {} + setmetatable(object, self) + self.__index = self + self.servers = {} + self.built_servers = {} + return object +end + +function Cluster:initialize() + self.servers = {} +end + +function Cluster:server(alias) + for _, server in ipairs(self.servers) do + if server.alias == alias then + return server + end + end + return nil +end + +function Cluster:drop() + for _, server in ipairs(self.servers) do + if server ~= nil then + server:stop() + server:cleanup() + end + end +end + +function Cluster:get_index(server) + local index = nil + for i, v in ipairs(self.servers) do + if (v.id == server) then + index = i + end + end + return index +end + +function Cluster:delete_server(server) + local idx = self:get_index(server) + if idx == nil then + print("Key does not exist") + else + table.remove(self.servers, idx) + end +end + +function Cluster:stop() + for _, server in ipairs(self.servers) do + if server ~= nil then + server:stop() + end + end +end + +function Cluster:start(opts) + for _, server in ipairs(self.servers) do + if not server.process then + server:start({wait_for_readiness = false}) + end + end + + -- The option is true by default. + local wait_for_readiness = true + if opts ~= nil and opts.wait_for_readiness ~= nil then + wait_for_readiness = opts.wait_for_readiness + end + + if wait_for_readiness then + for _, server in ipairs(self.servers) do + server:wait_for_readiness() + end + end +end + +function Cluster:build_server(server_config, instance_file) + if instance_file == nil then + error('instance_file must be set') + end + server_config = table.deepcopy(server_config) + server_config.command = fio.pathjoin(root, 'test/vshard_helpers/instances/', instance_file) + assert(server_config.alias, 'Either replicaset.alias or server.alias must be given') + local server = Server:new(server_config) + table.insert(self.built_servers, server) + return server +end + +function Cluster:add_server(server) + if self:server(server.alias) ~= nil then + error('Alias is not provided') + end + table.insert(self.servers, server) +end + +return Cluster diff --git a/test/vshard_helpers/instances/router.lua b/test/vshard_helpers/instances/router.lua new file mode 100755 index 00000000..c92c25a7 --- /dev/null +++ b/test/vshard_helpers/instances/router.lua @@ -0,0 +1,28 @@ +local fio = require('fio') + +local appdir = fio.abspath(debug.sourcedir() .. '/../../../') +if package.setsearchroot ~= nil then + package.setsearchroot(appdir) +else + package.path = package.path .. appdir .. '/?.lua;' + package.path = package.path .. appdir .. '/?/init.lua;' + package.path = package.path .. appdir .. '/.rocks/share/tarantool/?.lua;' + package.path = package.path .. appdir .. '/.rocks/share/tarantool/?/init.lua;' + package.cpath = package.cpath .. appdir .. '/?.so;' + package.cpath = package.cpath .. appdir .. '/?.dylib;' + package.cpath = package.cpath .. appdir .. '/.rocks/lib/tarantool/?.so;' + package.cpath = package.cpath .. appdir .. '/.rocks/lib/tarantool/?.dylib;' +end + +local utils = require('test.vshard_helpers.instances.utils') + +-- Somewhy shutdown hangs on new Tarantools even though the nodes do not seem to +-- have any long requests running. +if box.ctl.set_on_shutdown_timeout then + box.ctl.set_on_shutdown_timeout(0.001) +end + +box.cfg(utils.box_cfg()) +box.schema.user.grant('guest', 'super', nil, nil, {if_not_exists = true}) + +_G.ready = true diff --git a/test/vshard_helpers/instances/storage.lua b/test/vshard_helpers/instances/storage.lua new file mode 100644 index 00000000..175bbda8 --- /dev/null +++ b/test/vshard_helpers/instances/storage.lua @@ -0,0 +1,33 @@ +local fio = require('fio') + +local appdir = fio.abspath(debug.sourcedir() .. '/../../../') +if package.setsearchroot ~= nil then + package.setsearchroot(appdir) +else + package.path = package.path .. appdir .. '/?.lua;' + package.path = package.path .. appdir .. '/?/init.lua;' + package.path = package.path .. appdir .. '/.rocks/share/tarantool/?.lua;' + package.path = package.path .. appdir .. '/.rocks/share/tarantool/?/init.lua;' + package.cpath = package.cpath .. appdir .. '/?.so;' + package.cpath = package.cpath .. appdir .. '/?.dylib;' + package.cpath = package.cpath .. appdir .. '/.rocks/lib/tarantool/?.so;' + package.cpath = package.cpath .. appdir .. '/.rocks/lib/tarantool/?.dylib;' +end + +local utils = require('test.vshard_helpers.instances.utils') + +-- It is not necessary in fact, but simplify `callrw` calls in tests. +_G.vshard = { + storage = require('vshard.storage'), +} + +-- Somewhy shutdown hangs on new Tarantools even though the nodes do not seem to +-- have any long requests running. +if box.ctl.set_on_shutdown_timeout then + box.ctl.set_on_shutdown_timeout(0.001) +end + +box.cfg(utils.box_cfg()) +box.schema.user.grant('guest', 'super', nil, nil, {if_not_exists = true}) + +_G.ready = true diff --git a/test/vshard_helpers/instances/utils.lua b/test/vshard_helpers/instances/utils.lua new file mode 100644 index 00000000..c85d589b --- /dev/null +++ b/test/vshard_helpers/instances/utils.lua @@ -0,0 +1,28 @@ +local fun = require('fun') +local json = require('json') + +local utils = {} + +local function default_cfg() + return { + work_dir = os.getenv('TARANTOOL_WORKDIR'), + listen = os.getenv('TARANTOOL_LISTEN'), + log = ('%s/%s.log'):format(os.getenv('TARANTOOL_WORKDIR'), os.getenv('TARANTOOL_ALIAS')), + } +end + +local function env_cfg() + local src = os.getenv('TARANTOOL_BOX_CFG') + if src == nil then + return {} + end + local res = json.decode(src) + assert(type(res) == 'table') + return res +end + +function utils.box_cfg(cfg) + return fun.chain(default_cfg(), env_cfg(), cfg or {}):tomap() +end + +return utils diff --git a/test/vshard_helpers/server.lua b/test/vshard_helpers/server.lua new file mode 100644 index 00000000..272efa2b --- /dev/null +++ b/test/vshard_helpers/server.lua @@ -0,0 +1,351 @@ +local clock = require('clock') +local digest = require('digest') +local ffi = require('ffi') +local fiber = require('fiber') +local fio = require('fio') +local fun = require('fun') +local json = require('json') +local errno = require('errno') +local log = require('log') +local yaml = require('yaml') + +local checks = require('checks') +local luatest = require('luatest') + +ffi.cdef([[ + int kill(pid_t pid, int sig); +]]) + +local Server = luatest.Server:inherit({}) + +local WAIT_TIMEOUT = 60 +local WAIT_DELAY = 0.1 + +-- Differences from luatest.Server: +-- +-- * 'alias' is mandatory. +-- * 'datadir' is optional, specifies a directory: if specified, the directory's +-- contents will be recursively copied into 'workdir' during initialization. +-- * 'workdir' is optional, determined by 'alias'. +-- * The new 'box_cfg' parameter. +-- * engine - provides engine for parameterized tests +Server.constructor_checks = fun.chain(Server.constructor_checks, { + alias = 'string', + command = '?string', + datadir = '?string', + workdir = '?string', + box_cfg = '?table', + engine = '?string', +}):tomap() + +Server.socketdir = fio.abspath(os.getenv('VARDIR') or '/tmp/t') + +function Server.build_instance_uri(alias) + return ('%s/%s.iproto'):format(Server.socketdir, alias) +end + +function Server:initialize() + if self.id == nil then + local random = digest.urandom(9) + self.id = digest.base64_encode(random, {urlsafe = true}) + end + if self.command == nil then + error("command required") + end + if self.workdir == nil then + self.workdir = ('%s/%s-%s'):format(self.socketdir, self.alias, self.id) + fio.rmtree(self.workdir) + fio.mktree(self.workdir) + end + if self.datadir ~= nil then + local ok, err = fio.copytree(self.datadir, self.workdir) + if not ok then + error(string.format('Failed to copy directory: %s', err)) + end + self.datadir = nil + end + if self.net_box_port == nil and self.net_box_uri == nil then + self.net_box_uri = self.build_instance_uri(self.alias) + fio.mktree(self.socketdir) + end + + -- AFAIU, the inner getmetatable() returns our helpers.Server + -- class, the outer one returns luatest.Server class. + getmetatable(getmetatable(self)).initialize(self) +end + +--- Generates environment to run process with. +-- The result is merged into os.environ(). +-- @return map +function Server:build_env() + local res = getmetatable(getmetatable(self)).build_env(self) + if self.box_cfg ~= nil then + res.TARANTOOL_BOX_CFG = json.encode(self.box_cfg) + end + res.ENGINE = self.engine + return res +end + +local function wait_cond(cond_name, server, func, ...) + local alias = server.alias + local id = server.id + local pid = server.process.pid + + local deadline = clock.time() + WAIT_TIMEOUT + while true do + if func(...) then + return + end + if clock.time() > deadline then + error(('Waiting for "%s" on server %s-%s (PID %d) timed out') + :format(cond_name, alias, id, pid)) + end + fiber.sleep(WAIT_DELAY) + end +end + +function Server:wait_for_readiness() + return wait_cond('readiness', self, function() + local ok, is_ready = pcall(function() + self:connect_net_box() + return self.net_box:eval('return _G.ready') == true + end) + return ok and is_ready + end) +end + +function Server:wait_election_leader() + -- Include read-only property too because if an instance is a leader, it + -- does not mean it finished the synchro queue ownership transition. It is + -- read-only until that happens. But in tests usually the leader is needed + -- as a writable node. + return wait_cond('election leader', self, self.exec, self, function() + return box.info.election.state == 'leader' and not box.info.ro + end) +end + +function Server:wait_election_leader_found() + return wait_cond('election leader is found', self, self.exec, self, + function() return box.info.election.leader ~= 0 end) +end + +function Server:wait_election_term(term) + return wait_cond('election term', self, self.exec, self, function(term) + return box.info.election.term >= term + end, {term}) +end + +function Server:wait_synchro_queue_term(term) + return wait_cond('synchro queue term', self, self.exec, self, function(term) + return box.info.synchro.queue.term >= term + end, {term}) +end + +-- Unlike the original luatest.Server function it waits for +-- starting the server. +function Server:start(opts) + checks('table', { + wait_for_readiness = '?boolean', + }) + getmetatable(getmetatable(self)).start(self) + + -- The option is true by default. + local wait_for_readiness = true + if opts ~= nil and opts.wait_for_readiness ~= nil then + wait_for_readiness = opts.wait_for_readiness + end + + if wait_for_readiness then + self:wait_for_readiness() + end +end + +function Server:instance_id() + -- Cache the value when found it first time. + if self.instance_id_value then + return self.instance_id_value + end + local id = self:exec(function() return box.info.id end) + -- But do not cache 0 - it is an anon instance, its ID might change. + if id ~= 0 then + self.instance_id_value = id + end + return id +end + +function Server:instance_uuid() + -- Cache the value when found it first time. + if self.instance_uuid_value then + return self.instance_uuid_value + end + local uuid = self:exec(function() return box.info.uuid end) + self.instance_uuid_value = uuid + return uuid +end + +function Server:replicaset_uuid() + -- Cache the value when found it first time. + if self.replicaset_uuid_value then + return self.replicaset_uuid_value + end + local uuid = self:exec(function() + local info = box.info + if info.replicaset then + return info.replicaset.uuid + end + return info.cluster.uuid + end) + if uuid == nil then + -- Probably didn't bootstrap yet. Listen starts before replicaset UUID + -- is assigned. + return nil + end + self.replicaset_uuid_value = uuid + return uuid +end + +function Server:election_term() + return self:exec(function() return box.info.election.term end) +end + +function Server:synchro_queue_term() + return self:exec(function() return box.info.synchro.queue.term end) +end + +-- TODO: Add the 'wait_for_readiness' parameter for the restart() +-- method. + +-- Unlike the original luatest.Server function it waits until +-- the server will stop. +function Server:stop() + local alias = self.alias + local id = self.id + if self.process then + local pid = self.process.pid + getmetatable(getmetatable(self)).stop(self) + + local deadline = clock.time() + WAIT_TIMEOUT + while true do + if ffi.C.kill(pid, 0) ~= 0 then + break + end + if clock.time() > deadline then + error(('Stopping of server %s-%s (PID %d) was timed out'):format( + alias, id, pid)) + end + fiber.sleep(WAIT_DELAY) + end + end +end + +function Server:cleanup() + fio.rmtree(self.workdir) + self.instance_id_value = nil + self.instance_uuid_value = nil + self.replicaset_uuid_value = nil +end + +function Server:drop() + self:stop() + self:cleanup() +end + +-- A copy of test_run:grep_log. +function Server:grep_log(what, bytes, opts) + local opts = opts or {} + local noreset = opts.noreset or false + -- if instance has crashed provide filename to use grep_log + local filename = opts.filename or self:eval('return box.cfg.log') + local file = fio.open(filename, {'O_RDONLY', 'O_NONBLOCK'}) + + local function fail(msg) + local err = errno.strerror() + if file ~= nil then + file:close() + end + error(string.format("%s: %s: %s", msg, filename, err)) + end + + if file == nil then + fail("Failed to open log file") + end + io.flush() -- attempt to flush stdout == log fd + local filesize = file:seek(0, 'SEEK_END') + if filesize == nil then + fail("Failed to get log file size") + end + local bytes = bytes or 65536 -- don't read whole log - it can be huge + bytes = bytes > filesize and filesize or bytes + if file:seek(-bytes, 'SEEK_END') == nil then + fail("Failed to seek log file") + end + local found, buf + repeat -- read file in chunks + local s = file:read(2048) + if s == nil then + fail("Failed to read log file") + end + local pos = 1 + repeat -- split read string in lines + local endpos = string.find(s, '\n', pos) + endpos = endpos and endpos - 1 -- strip terminating \n + local line = string.sub(s, pos, endpos) + if endpos == nil and s ~= '' then + -- line doesn't end with \n or eof, append it to buffer + -- to be checked on next iteration + buf = buf or {} + table.insert(buf, line) + else + if buf ~= nil then -- prepend line with buffered data + table.insert(buf, line) + line = table.concat(buf) + buf = nil + end + if string.match(line, "Starting instance") and not noreset then + found = nil -- server was restarted, reset search + else + found = string.match(line, what) or found + end + end + pos = endpos and endpos + 2 -- jump to char after \n + until pos == nil + until s == '' + file:close() + return found +end + +function Server:get_vclock() + return self:exec(function() return box.info.vclock end) +end + +function Server:wait_vclock(to_vclock) + while true do + local vclock = self:get_vclock() + local ok = true + + for server_id, to_lsn in pairs(to_vclock) do + local lsn = vclock[server_id] + if lsn == nil or lsn < to_lsn then + ok = false + break + end + end + + if ok then + return + end + + log.info("wait vclock: %s to %s", + yaml.encode(vclock), yaml.encode(to_vclock)) + fiber.sleep(0.001) + end +end + +function Server:wait_vclock_of(other_server) + local vclock = other_server:get_vclock() + -- First component is for local changes. + vclock[0] = nil + return self:wait_vclock(vclock) +end + +return Server diff --git a/test/vshard_helpers/vtest.lua b/test/vshard_helpers/vtest.lua new file mode 100644 index 00000000..dc717c77 --- /dev/null +++ b/test/vshard_helpers/vtest.lua @@ -0,0 +1,878 @@ +local t = require('luatest') +local cluster = require('test.vshard_helpers.cluster') +local fio = require('fio') +local fiber = require('fiber') +local uuid = require('uuid') +local yaml = require('yaml') +local vrepset = require('vshard.replicaset') +local log = require('log') + +local SOCKET_DIR = fio.abspath(os.getenv('VARDIR') or '/tmp/t') +local function instance_uri(alias, instance_id) + if instance_id == nil then + instance_id = '' + end + instance_id = tostring(instance_id) + return ('%s/%s%s.iproto'):format(SOCKET_DIR, alias, instance_id); +end + +local wait_timeout = 50 +-- Use it in busy-loops like `while !cond do fiber.sleep(busy_step) end`. +local busy_step = 0.005 +local uuid_idx = 1 +-- +-- The maps help to preserve the same UUID for replicas and replicasets during +-- reconfiguration. Reconfig means an update of a cfg template which doesn't +-- contain UUIDs + generation of a new real cfg to apply on nodes. The real cfg +-- needs to have same UUIDs for the nodes used in the old versions of the +-- template. +-- +local replica_name_to_uuid_map = {} +local replicaset_name_to_uuid_map = {} + +local cert_dir = fio.pathjoin(fio.cwd(), './test/certs') +local ssl_ca_file = fio.pathjoin(cert_dir, 'ca.crt') +local ssl_server_cert_file = fio.pathjoin(cert_dir, 'server.crt') +local ssl_server_key_file = fio.pathjoin(cert_dir, 'server.key') + +local function uuid_str_from_int(i) + i = tostring(i) + assert(#i <= 12) + return '00000000-0000-0000-0000-'..string.rep('0', 12 - #i)..i +end + +local function uuid_from_int(i) + return uuid.fromstr(uuid_str_from_int(i)) +end + +-- +-- New UUID unique per this process. Generation is not random - for simplicity +-- and reproducibility. +-- +local function uuid_str_next() + local i = uuid_idx + uuid_idx = uuid_idx + 1 + return uuid_str_from_int(i) +end + +local function name_to_uuid(map, name) + local res = map[name] + if not res then + res = uuid_str_next() + map[name] = res + end + return res +end + +local function replica_name_to_uuid(name) + return name_to_uuid(replica_name_to_uuid_map, name) +end + +local function replicaset_name_to_uuid(name) + return name_to_uuid(replicaset_name_to_uuid_map, name) +end + +-- +-- Timeout error can be a ClientError with ER_TIMEOUT code or a TimedOut error +-- which is ER_SYSTEM. They also have different messages. Same public APIs can +-- return both errors depending on core version and/or error cause. This func +-- helps not to care. +-- +local function error_is_timeout(err) + return err.code == box.error.TIMEOUT or (err.code == box.error.PROC_LUA and + err.message == 'Timeout exceeded') or err.type == 'TimedOut' +end + +-- +-- Build a valid vshard config by a template. A template does not specify +-- anything volatile such as URIs, UUIDs - these are installed at runtime. +-- +local function config_new(templ) + local res = table.deepcopy(templ) + local sharding = {} + res.sharding = sharding + -- Is supposed to intensify reconnects when replication and listen URIs + -- change. + res.replication_timeout = 0.1 + for i, replicaset_templ in pairs(templ.sharding) do + local replicaset_uuid = replicaset_name_to_uuid(i) + local replicas = {} + local replicaset = table.deepcopy(replicaset_templ) + replicaset.replicas = replicas + replicaset.is_ssl = nil + local is_ssl = replicaset_templ.is_ssl + for replica_name, replica_templ in pairs(replicaset_templ.replicas) do + local replica_uuid = replica_name_to_uuid(replica_name) + if replica_templ.instance_uuid ~= nil then + replica_uuid = replica_templ.instance_uuid + end + local replica = table.deepcopy(replica_templ) + replica.port_uri = nil + replica.port_count = nil + replica.instance_uuid = nil + replica.name = replica_name + + local port_count = replica_templ.port_count + local creds = 'storage:storage@' + if port_count == nil then + replica.uri = creds..instance_uri(replica_name) + else + local listen = table.new(port_count, 0) + for j = 1, port_count do + listen[j] = creds..instance_uri(replica_name..j) + end + replica.listen = listen + replica.uri = listen[replica_templ.port_uri] + end + if is_ssl then + if not replica.listen then + replica.listen = {replica.uri} + end + for j, listen in pairs(replica.listen) do + replica.listen[j] = { + listen, + params = { + transport = 'ssl', + ssl_cert_file = ssl_server_cert_file, + ssl_key_file = ssl_server_key_file, + }, + } + end + replica.uri = { + replica.uri, + params = { + transport = 'ssl', + ssl_ca_file = ssl_ca_file, + } + } + end + replicas[replica_uuid] = replica + end + sharding[replicaset_uuid] = replicaset + end + return res +end + +-- +-- Apply the config on the given router. +-- +local function router_cfg(router, cfg) + router:exec(function(cfg) + require('vshard.router').cfg(cfg) + end, {cfg}) +end + +-- +-- Create a new router in the cluster. +-- If no cfg was passed configuration should be done manually with server:exec +-- +local function router_new(g, name, cfg) + if not g.cluster then + g.cluster = cluster:new({}) + end + local server = g.cluster:build_server({ + alias = name, + engine = cfg.engine, + }, 'router.lua') + + g.cluster[name] = server + g.cluster:add_server(server) + server:start() + if cfg then + router_cfg(server, cfg) + end + return server +end + +-- +-- Start a main_server router. +-- +local function start_main_server(g, cfg, opts) + local cfg = table.deepcopy(cfg) + cfg.engine = nil + + local router = router_new(g, 'main_server', cfg) + if opts.router_init ~= nil then + router:exec(function(router_init) + require(router_init)() + end, {opts.router_init}) + end + if opts.all_init ~= nil then + router:exec(function(all_init) + require(all_init)() + end, {opts.all_init}) + end + if opts.crud_init then + router:exec(function() + require('crud').init_router() + end) + end +end + +local function storage_boot_one_f(first, count) + return require('vshard.storage').bucket_force_create(first, count) +end + +-- +-- Bootstrap the cluster without a router by a given config. In theory the +-- config could be fetched from the storages, but it would force to check its +-- consistency. +-- +local function cluster_bootstrap(g, cfg) + local masters = {} + local etalon_balance = {} + local replicaset_count = 0 + for rs_uuid, rs in pairs(cfg.sharding) do + local is_master_found = false + for _, rep in pairs(rs.replicas) do + if rep.master then + t.assert(not is_master_found, 'only one master') + local server = g.cluster[rep.name] + t.assert_not_equals(server, nil, 'find master instance') + t.assert_equals(server:replicaset_uuid(), rs_uuid, + 'replicaset uuid') + masters[rs_uuid] = server + is_master_found = true + end + end + t.assert(is_master_found, 'found master') + local weight = rs.weight + if weight == nil then + weight = 1 + end + etalon_balance[rs_uuid] = { + weight = weight + } + replicaset_count = replicaset_count + 1 + end + t.assert_not_equals(masters, {}, 'have masters') + vrepset.calculate_etalon_balance(etalon_balance, cfg.bucket_count) + local fibers = table.new(0, replicaset_count) + local bid = 1 + for rs_uuid, rs in pairs(etalon_balance) do + local master = masters[rs_uuid] + local count = rs.etalon_bucket_count + local f = fiber.new(master.exec, master, storage_boot_one_f, + {bid, count}) + f:set_joinable(true) + fibers[master.vtest.name] = f + bid = bid + count + end + local errors = {} + for name, f in pairs(fibers) do + local ok, res1, res2 = f:join() + if not ok then + errors[name] = res1 + elseif res1 == nil then + errors[name] = res2 + else + t.assert_equals(res2, nil, 'boot_one no error') + t.assert(res1, 'boot_one success') + end + end + t.assert_equals(errors, {}, 'storage bootstrap') +end + + +-- +-- Find all vshard storages in the cluster. +-- +local function cluster_find_all(g) + local result = {} + for _, storage in pairs(g.cluster.servers) do + if storage.vtest and storage.vtest.is_storage then + table.insert(result, storage) + end + end + return result +end + +-- +-- Wait vclock sync in each replicaset between all its replicas. +-- +local function cluster_wait_vclock_all(g) + local replicasets = {} + for _, storage in pairs(cluster_find_all(g)) do + local uuid = storage:replicaset_uuid() + local replicaset = replicasets[uuid] + if not replicaset then + replicasets[uuid] = {storage} + else + table.insert(replicaset, storage) + end + end + for _, replicaset in pairs(replicasets) do + for i = 1, #replicaset do + local s1 = replicaset[i] + for j = i + 1, #replicaset do + local s2 = replicaset[j] + s1:wait_vclock_of(s2) + s2:wait_vclock_of(s1) + end + end + end +end + +-- +-- Build new cluster by a given config. +-- +local function cluster_new(g, cfg) + if not g.cluster then + g.cluster = cluster:new({}) + end + local all_servers = {} + local masters = {} + local replicas = {} + + local storage_init = cfg.storage_init + local router_init = cfg.router_init + local all_init = cfg.all_init + local crud_init = cfg.crud_init + + cfg.storage_init = nil + cfg.router_init = nil + cfg.all_init = nil + cfg.crud_init = nil + + for replicaset_uuid, replicaset in pairs(cfg.sharding) do + -- Luatest depends on box.cfg being ready and listening. Need to + -- configure it before vshard.storage.cfg(). + local box_repl = {} + for _, replica in pairs(replicaset.replicas) do + table.insert(box_repl, replica.uri) + end + local box_cfg = { + replication = box_repl, + -- Speed retries up. + replication_timeout = 0.1, + } + for replica_uuid, replica in pairs(replicaset.replicas) do + local name = replica.name + box_cfg.instance_uuid = replica_uuid + box_cfg.replicaset_uuid = replicaset_uuid + box_cfg.listen = instance_uri(replica.name) + -- Need to specify read-only explicitly to know how is master. + box_cfg.read_only = not replica.master + box_cfg.memtx_use_mvcc_engine = cfg.memtx_use_mvcc_engine + local server = g.cluster:build_server({ + alias = name, + box_cfg = box_cfg, + engine = cfg.engine, + }, 'storage.lua') + + g.cluster[name] = server + -- VShard specific details to use in various helper functions. + server.vtest = { + name = name, + replicaset = replicaset_uuid, + is_storage = true, + master = replica.master, + } + g.cluster:add_server(server) + + table.insert(all_servers, server) + if replica.master then + table.insert(masters, server) + else + table.insert(replicas, server) + end + end + end + for _, replica in pairs(all_servers) do + replica:start({wait_for_readiness = false}) + end + + for _, master in pairs(masters) do + master:wait_for_readiness() + master:exec(function(cfg) + -- Logged in as guest with 'super' access rights. Yet 'super' is not + -- enough to grant 'replication' privilege. The simplest way - login + -- as admin for that temporary. + local user = box.session.user() + box.session.su('admin') + + cfg.engine = nil + require('vshard.storage').cfg(cfg, box.info.uuid) + box.schema.user.grant('storage', 'super') + + box.session.su(user) + end, {cfg}) + end + + for _, replica in pairs(replicas) do + replica:wait_for_readiness() + replica:exec(function(cfg) + cfg.engine = nil + require('vshard.storage').cfg(cfg, box.info.uuid) + end, {cfg}) + end + + for _, replica in pairs(all_servers) do + if storage_init ~= nil then + replica:exec(function(storage_init) + require(storage_init)() + end, {storage_init}) + end + if all_init ~= nil then + replica:exec(function(all_init) + require(all_init)() + end, {all_init}) + end + if crud_init then + replica:exec(function() + require('crud').init_storage() + end) + end + end + + start_main_server(g, cfg, { + router_init = router_init, + all_init = all_init, + crud_init = crud_init, + }) + + cluster_bootstrap(g, cfg) + cluster_wait_vclock_all(g) +end + +-- +-- Execute func(storage) in parallel for all the given storages. +-- +local function cluster_for_each_in(storages, func) + local fibers = table.new(0, #storages) + -- Map-reduce. Parallel execution not only is faster but also helps not to + -- depend on which order would be non-blocking. For example, at storage + -- reconfiguration there might be a config which makes the master hang until + -- some replica is configured first. When all are done in parallel, it won't + -- matter. + for _, storage in pairs(storages) do + local name = storage.vtest.name + local f = fiber.new(func, storage) + f:set_joinable(true) + fibers[name] = f + end + local result = table.new(0, #storages) + local errors = {} + for name, f in pairs(fibers) do + local ok, res = f:join() + if not ok then + errors[name] = res + else + result[name] = res + end + end + if not next(errors) then + return result + end + return nil, errors +end + +-- +-- Execute func(storage) in parallel for all storages. +-- +local function cluster_for_each(g, func) + return cluster_for_each_in(cluster_find_all(g), func) +end + +-- +-- Execute storage:exec(func, args) in parallel for all storages. +-- +local function cluster_exec_each(g, func, args) + return cluster_for_each(g, function(storage) + return storage:exec(func, args) + end) +end + +-- +-- Find all vshard storage masters in the cluster. +-- +local function cluster_find_all_masters(g) + local res, err = cluster_for_each(g, function(storage) + return storage:call('vshard.storage._call', {'info'}).is_master + end) + if not res then + return nil, err + end + local masters = {} + for name, is_master in pairs(res) do + if is_master then + local server = g.cluster[name] + t.assert_not_equals(server, nil, 'find master instance') + table.insert(masters, server) + end + end + return masters +end + +-- +-- Execute func(storage) in parallel for all master storages. +-- +local function cluster_for_each_master(g, func) + local masters, err = cluster_find_all_masters(g) + if not masters then + return nil, err + end + return cluster_for_each_in(masters, func) +end + +-- +-- Execute storage:exec(func, args) in parallel for all master storages. +-- +local function cluster_exec_each_master(g, func, args) + return cluster_for_each_master(g, function(storage) + return storage:exec(func, args) + end) +end + +-- +-- Apply the config to all vshard storages in the cluster. +-- +local function cluster_cfg(g, cfg) + -- No support yet for dynamic node addition and removal. Only reconfig. + local _, err = cluster_exec_each(g, function(cfg) + return require('vshard.storage').cfg(cfg, box.info.uuid) + end, {cfg}) + t.assert_equals(err, nil, 'storage reconfig') +end + +-- +-- Find first active bucket on the storage. In tests it helps not to assume +-- where the buckets are located by hardcoded numbers and uuids. +-- +local function storage_first_bucket(storage) + return storage:exec(function() + return _G.get_first_bucket() + end) +end + +-- +-- Disable rebalancer on all storages. +-- +local function cluster_rebalancer_disable(g) + local _, err = cluster_exec_each(g, function() + require('vshard.storage').rebalancer_disable() + end) + t.assert_equals(err, nil, 'rebalancer disable') +end + +-- +-- Enable rebalancer on all storages. +-- +local function cluster_rebalancer_enable(g) + local _, err = cluster_exec_each(g, function() + require('vshard.storage').rebalancer_enable() + end) + t.assert_equals(err, nil, 'rebalancer enable') +end + +-- +-- Wait until the instance follows the master having the given instance ID. +-- +local function storage_wait_follow_f(id) + local deadline = fiber.clock() + wait_timeout + local last_err + while true do + local info = box.info.replication[id] + local stream, status + if info == nil then + last_err = 'Not found replication info' + goto retry + end + stream = info.upstream + if stream == nil then + last_err = 'Not found upstream' + goto retry + end + status = stream.status + if status == nil then + last_err = 'Not found upstream status' + goto retry + end + if status ~= 'follow' then + last_err = 'Upstream status is not follow' + goto retry + end + do return end + ::retry:: + if fiber.clock() > deadline or status == 'stopped' then + t.fail(yaml.encode({ + err = last_err, + dst_id = id, + replication_info = box.info.replication, + replication_cfg = box.cfg.replication, + })) + end + fiber.sleep(0.01) + end +end + +-- +-- Wait full synchronization between the given servers: same vclock and mutual +-- following. +-- +local function storage_wait_pairsync(s1, s2) + s1:wait_vclock_of(s2) + s2:wait_vclock_of(s1) + s1:exec(storage_wait_follow_f, {s2:instance_id()}) + s2:exec(storage_wait_follow_f, {s1:instance_id()}) +end + +-- +-- Wait full synchronization between all nodes in each replication of the +-- cluster. +-- +local function cluster_wait_fullsync(g) + local replicasets = {} + for _, storage in pairs(cluster_find_all(g)) do + local uuid = storage:replicaset_uuid() + local replicaset = replicasets[uuid] + if not replicaset then + replicasets[uuid] = {storage} + else + table.insert(replicaset, storage) + end + end + for _, replicaset in pairs(replicasets) do + for i = 1, #replicaset do + local s1 = replicaset[i] + for j = i + 1, #replicaset do + storage_wait_pairsync(s1, replicaset[j]) + end + end + end +end + +-- +-- Stop data node. Wrapped into a one-line function in case in the future would +-- want to do something more here. +-- +local function storage_stop(storage) + storage:stop() +end + +-- +-- Start a data node + cfg it right away. Usually this is what is really wanted, +-- not an unconfigured instance. +-- +local function storage_start(storage, cfg) + storage:start() + local _, err = storage:exec(function(cfg) + return require('vshard.storage').cfg(cfg, box.info.uuid) + end, {cfg}) + t.assert_equals(err, nil, 'storage cfg on start') +end + +-- +-- Disconnect the router from all storages. +-- +local function router_disconnect(router) + router:exec(function() + local replicasets = require('vshard.router').static.replicasets + for _, rs in pairs(replicasets) do + for _, r in pairs(rs.replicas) do + local c = r.conn + if c then + c:close() + end + end + end + end) +end + +local function drop_instance(g, instance) + if g.cluster then + g.cluster:delete_server(instance.id) + end + + instance:drop() + g.cluster[instance.alias] = nil +end + +-- +-- Wait until the member of the tab (table) becomes not equal to nil. +-- As we want to have a 'pointer' to the member and not just the copy +-- of nil, we pass table and the expected member's name. +-- +local function wait_for_not_nil(tab, member, opts) + opts = opts or {} + t.assert_equals(type(tab), 'table') + t.helpers.retrying({timeout = opts.timeout or wait_timeout, + delay = busy_step}, function() + if tab[member] ~= nil then + return + end + + if opts.on_yield then + opts.on_yield() + end + error(string.format('timed out: %s is still nil', member)) + end) +end + +-- +-- Wait until the member of the table becomes equals to nil +-- Same as wait_for_not_nil. +-- +local function wait_for_nil(tab, member, opts) + opts = opts or {} + t.assert_equals(type(tab), 'table') + t.helpers.retrying({timeout = opts.timeout or wait_timeout, + delay = busy_step}, function() + if tab[member] == nil then + return + end + + if opts.on_yield then + opts.on_yield() + end + error(string.format('timed out: %s is still not nil', member)) + end) +end + +-------------------------------------------------------------------------------- +-- Service info helpers +-------------------------------------------------------------------------------- + +-- +-- Wait for the status of the service to be equal to the one user expects. +-- The function assumes, that the current status is not the one user want +-- to see (requested status must have another status_idx). +-- +-- Following opts are accepted: opts.timeout and opts.on_yield +-- +local function service_wait_for_new_status(service, status, opts) + opts = opts or {} + local first_status_idx = service.status_idx + t.helpers.retrying({timeout = opts.timeout or wait_timeout, + delay = busy_step}, function() + if first_status_idx ~= service.status_idx and + service.status == status then + return + end + if opts.on_yield then + opts.on_yield() + end + error(string.format('waiting for status "%s" timed out: ' .. + 'last status is "%s" with status_idx %d', + status, service.status, service.status_idx)) + end) +end + +-- +-- Wait until the error, passed to the function's argument, occurs. +-- The error must have new status_idx different from the current one. +-- +local function service_wait_for_new_error(service, error, opts) + log.info('Waiting for new error "%s" of "%s" service', + error, service.name) + repeat + service_wait_for_new_status(service, 'error', opts) + until string.match(service.error, error) +end + +-- +-- Check if current status is 'error' and the error is the one, +-- user requested. If not, wait for a new 'error'. +-- +local function service_wait_for_error(service, error, opts) + log.info('Waiting for error "%s" of "%s" service', error, service.name) + if service.status == 'error' and string.match(service.error, error) then + return + end + service_wait_for_new_error(service, error, opts) +end + +-- +-- Wait until the new status of the service is 'ok'. The status must +-- have new status_idx different from the current one. +-- +local function service_wait_for_new_ok(service, opts) + log.info('Waiting for new ok status of "%s" service', service.name) + service_wait_for_new_status(service, 'ok', opts) +end + +-- +-- Check if current status is 'ok' and if not, wait for a new 'ok'. +-- Ignore this status if `status_idx` equals to 0 as this one is default +-- value and no iteration of background service was done yet. +-- +local function service_wait_for_ok(service, opts) + log.info('Waiting for ok status of "%s" service', service.name) + if service.status_idx ~= 0 and service.status == 'ok' then + log.info('"%s" already has ok status', service.name) + return + end + service_wait_for_new_ok(service, opts) +end + +-- +-- Wait for activity. Passed value can be the substring of the +-- actual activity. +-- +local function service_wait_for_activity(service, activity, opts) + opts = opts or {} + log.info('Waiting for activity "%s" of "%s" service ', + activity, service.name) + t.helpers.retrying({timeout = opts.timeout or wait_timeout, + delay = busy_step}, function() + if string.match(service.activity, activity) then + return + end + if opts.on_yield then + opts.on_yield() + end + error(string.format('waiting for activity "%s" timed out: '.. + 'last activity is %s', activity, + service.activity)) + end) +end + +local sourcedir = fio.dirname(debug.sourcedir()) + +-- Git directory of the project and data directory of the test. +-- Used in evolution tests to fetch old versions of vshard. +--local sourcedir = fio.abspath(os.getenv('PACKPACK_GIT_SOURCEDIR') or +-- os.getenv('SOURCEDIR')) +--if not sourcedir then +-- local script_path = debug.getinfo(1).source:match("@?(.*/)") +-- script_path = fio.abspath(script_path) +-- sourcedir = fio.abspath(script_path .. '/../../../') +--end + +-- May be nil, if VARDIR is not specified. +-- local vardir = fio.abspath(os.getenv('VARDIR')) + +return { + error_is_timeout = error_is_timeout, + config_new = config_new, + cluster_new = cluster_new, + cluster_cfg = cluster_cfg, + cluster_for_each = cluster_for_each, + cluster_exec_each = cluster_exec_each, + cluster_for_each_master = cluster_for_each_master, + cluster_exec_each_master = cluster_exec_each_master, + cluster_bootstrap = cluster_bootstrap, + cluster_rebalancer_disable = cluster_rebalancer_disable, + cluster_rebalancer_enable = cluster_rebalancer_enable, + cluster_wait_vclock_all = cluster_wait_vclock_all, + cluster_wait_fullsync = cluster_wait_fullsync, + storage_first_bucket = storage_first_bucket, + storage_stop = storage_stop, + storage_start = storage_start, + router_new = router_new, + router_cfg = router_cfg, + router_disconnect = router_disconnect, + uuid_from_int = uuid_from_int, + wait_timeout = wait_timeout, + busy_step = busy_step, + drop_instance = drop_instance, + service_wait_for_ok = service_wait_for_ok, + service_wait_for_new_ok = service_wait_for_new_ok, + service_wait_for_error = service_wait_for_error, + service_wait_for_new_error = service_wait_for_new_error, + service_wait_for_activity = service_wait_for_activity, + wait_for_not_nil = wait_for_not_nil, + wait_for_nil = wait_for_nil, + sourcedir = sourcedir, + --vardir = vardir, +}