From 5cb1df1dc225742db184133bc7107419cefe0ddb Mon Sep 17 00:00:00 2001 From: Vladislav Shpilevoy Date: Mon, 11 Sep 2023 18:51:15 +0200 Subject: [PATCH] test: extract service_info into a separate file It is taking too long time (> 30 seconds). The commit moves it to a separate file so as the more generic test storage_1_1 wouldn't be too slow. The latter is going to get new test cases and it is easier to debug them when the info-test is in another place. Needed for #429 NO_DOC=refactoring --- test/storage-luatest/service_info_test.lua | 129 +++++++++++++++++++++ test/storage-luatest/storage_1_1_test.lua | 79 ------------- 2 files changed, 129 insertions(+), 79 deletions(-) create mode 100644 test/storage-luatest/service_info_test.lua diff --git a/test/storage-luatest/service_info_test.lua b/test/storage-luatest/service_info_test.lua new file mode 100644 index 00000000..ac2ac34a --- /dev/null +++ b/test/storage-luatest/service_info_test.lua @@ -0,0 +1,129 @@ +local t = require('luatest') +local vtest = require('test.luatest_helpers.vtest') +local vutil = require('vshard.util') + +local group_config = {{engine = 'memtx'}, {engine = 'vinyl'}} + +if vutil.feature.memtx_mvcc then + table.insert(group_config, { + engine = 'memtx', memtx_use_mvcc_engine = true + }) + table.insert(group_config, { + engine = 'vinyl', memtx_use_mvcc_engine = true + }) +end + +local test_group = t.group('storage', group_config) + +local cfg_template = { + sharding = { + { + replicas = { + replica_1_a = { + master = true, + }, + }, + }, + { + replicas = { + replica_2_a = { + master = true, + }, + }, + }, + }, + bucket_count = 10 +} +local global_cfg + +test_group.before_all(function(g) + cfg_template.memtx_use_mvcc_engine = g.params.memtx_use_mvcc_engine + global_cfg = vtest.config_new(cfg_template) + + vtest.cluster_new(g, global_cfg) + vtest.cluster_bootstrap(g, global_cfg) + vtest.cluster_rebalancer_disable(g) +end) + +test_group.after_all(function(g) + g.cluster:drop() +end) + +-- +-- Test that services for all background fibers are created +-- and work properly (gh-107). +-- +test_group.test_basic_storage_service_info = function(g) + local uuid = g.replica_1_a:exec(function() + -- Test that all services save states + local info = ivshard.storage.info({with_services = true}) + ilt.assert_not_equals(info.services, nil) + ilt.assert_not_equals(info.services.gc, nil) + ilt.assert_not_equals(info.services.recovery, nil) + ilt.assert_not_equals(info.services.rebalancer, nil) + -- Routes applier service is created as soon as it's needed + ilt.assert_equals(info.services.routes_applier, nil) + + -- Forbid routes_apply service to die + local internal = ivshard.storage.internal + internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = true + -- Break timeout in order to get error + rawset(_G, 'chunk_timeout', ivconst.REBALANCER_CHUNK_TIMEOUT) + ivconst.REBALANCER_CHUNK_TIMEOUT = 1e-6 + return ivutil.replicaset_uuid() + end) + + g.replica_2_a:exec(function(uuid) + -- Send bucket to create disbalance in + -- order to test routes applier service + local bid = _G.get_first_bucket() + local ok, err = ivshard.storage.bucket_send(bid, uuid) + ilt.assert_equals(err, nil) + ilt.assert(ok) + end, {uuid}) + + vtest.cluster_rebalancer_enable(g) + + g.replica_1_a:exec(function() + local internal = ivshard.storage.internal + local applier_name = 'routes_applier_service' + ivtest.wait_for_not_nil(internal, applier_name) + local service = internal[applier_name] + ivtest.service_wait_for_error(service, 'Timed?[Oo]ut') + + -- Restore everything + ivconst.REBALANCER_CHUNK_TIMEOUT = _G.chunk_timeout + internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = false + ivtest.wait_for_nil(internal, applier_name) + internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = true + + -- All buckets must be recovered to the ACTIVE state, + -- otherwise rebalancer won't work. + ivshard.storage.recovery_wakeup() + service = ivshard.storage.internal.recovery_service + ivtest.service_wait_for_new_ok(service) + end) + + g.replica_2_a:exec(function() + ivshard.storage.recovery_wakeup() + local service = ivshard.storage.internal.recovery_service + ivtest.service_wait_for_new_ok(service) + end) + + g.replica_1_a:exec(function() + local internal = ivshard.storage.internal + local applier_name = 'routes_applier_service' + ivtest.wait_for_not_nil(internal, applier_name, + {on_yield = ivshard.storage.rebalancer_wakeup}) + + -- Everything is all right now + ivtest.service_wait_for_ok(internal[applier_name]) + internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = false + + ivtest.service_wait_for_ok(internal.rebalancer_service, + {on_yield = ivshard.storage.rebalancer_wakeup}) + end) + + -- Cleanup + vtest.cluster_rebalancer_disable(g) +end diff --git a/test/storage-luatest/storage_1_1_test.lua b/test/storage-luatest/storage_1_1_test.lua index 53ebbc95..740e635c 100644 --- a/test/storage-luatest/storage_1_1_test.lua +++ b/test/storage-luatest/storage_1_1_test.lua @@ -261,82 +261,3 @@ test_group.test_on_bucket_event = function(g) box.space.data2:drop() end) end - --- --- Test that services for all background fibers are created --- and work properly (gh-107). --- -test_group.test_basic_storage_service_info = function(g) - local uuid = g.replica_1_a:exec(function() - -- Test that all services save states - local info = ivshard.storage.info({with_services = true}) - ilt.assert_not_equals(info.services, nil) - ilt.assert_not_equals(info.services.gc, nil) - ilt.assert_not_equals(info.services.recovery, nil) - ilt.assert_not_equals(info.services.rebalancer, nil) - -- Routes applier service is created as soon as it's needed - ilt.assert_equals(info.services.routes_applier, nil) - - -- Forbid routes_apply service to die - local internal = ivshard.storage.internal - internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = true - -- Break timeout in order to get error - rawset(_G, 'chunk_timeout', ivconst.REBALANCER_CHUNK_TIMEOUT) - ivconst.REBALANCER_CHUNK_TIMEOUT = 1e-6 - return ivutil.replicaset_uuid() - end) - - g.replica_2_a:exec(function(uuid) - -- Send bucket to create disbalance in - -- order to test routes applier service - local bid = _G.get_first_bucket() - local ok, err = ivshard.storage.bucket_send(bid, uuid) - ilt.assert_equals(err, nil) - ilt.assert(ok) - end, {uuid}) - - vtest.cluster_rebalancer_enable(g) - - g.replica_1_a:exec(function() - local internal = ivshard.storage.internal - local applier_name = 'routes_applier_service' - ivtest.wait_for_not_nil(internal, applier_name) - local service = internal[applier_name] - ivtest.service_wait_for_error(service, 'Timed?[Oo]ut') - - -- Restore everything - ivconst.REBALANCER_CHUNK_TIMEOUT = _G.chunk_timeout - internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = false - ivtest.wait_for_nil(internal, applier_name) - internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = true - - -- All buckets must be recovered to the ACTIVE state, - -- otherwise rebalancer won't work. - ivshard.storage.recovery_wakeup() - service = ivshard.storage.internal.recovery_service - ivtest.service_wait_for_new_ok(service) - end) - - g.replica_2_a:exec(function() - ivshard.storage.recovery_wakeup() - local service = ivshard.storage.internal.recovery_service - ivtest.service_wait_for_new_ok(service) - end) - - g.replica_1_a:exec(function() - local internal = ivshard.storage.internal - local applier_name = 'routes_applier_service' - ivtest.wait_for_not_nil(internal, applier_name, - {on_yield = ivshard.storage.rebalancer_wakeup}) - - -- Everything is all right now - ivtest.service_wait_for_ok(internal[applier_name]) - internal.errinj.ERRINJ_APPLY_ROUTES_STOP_DELAY = false - - ivtest.service_wait_for_ok(internal.rebalancer_service, - {on_yield = ivshard.storage.rebalancer_wakeup}) - end) - - -- Cleanup - vtest.cluster_rebalancer_disable(g) -end