Skip to content

Commit

Permalink
fix(tests): check cluster big snapshot in unit test (#4403)
Browse files Browse the repository at this point in the history
fix tests: check cluster big snapshot in unit test

Signed-off-by: adi_holden <[email protected]>
  • Loading branch information
adiholden authored Jan 5, 2025
1 parent 4f09fe0 commit 92c3749
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 59 deletions.
12 changes: 12 additions & 0 deletions src/server/cluster/cluster_family_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,18 @@ TEST_F(ClusterFamilyTest, ClusterFirstConfigCallDropsEntriesNotOwnedByNode) {
ExpectConditionWithinTimeout([&]() { return CheckedInt({"dbsize"}) == 0; });
}

TEST_F(ClusterFamilyTest, SnapshotBiggerThanMaxMemory) {
InitWithDbFilename();
ConfigSingleNodeCluster(GetMyId());

Run({"debug", "populate", "50000"});
EXPECT_EQ(Run({"save", "df"}), "OK");

max_memory_limit = 10000;
auto save_info = service_->server_family().GetLastSaveInfo();
EXPECT_EQ(Run({"dfly", "load", save_info.file_name}), "OK");
}

TEST_F(ClusterFamilyTest, Keyslot) {
// Example from Redis' command reference: https://redis.io/commands/cluster-keyslot/
EXPECT_THAT(Run({"cluster", "keyslot", "somekey"}), IntArg(11'058));
Expand Down
59 changes: 0 additions & 59 deletions tests/dragonfly/cluster_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2499,62 +2499,3 @@ async def test_migration_timeout_on_sync(df_factory: DflyInstanceFactory, df_see
await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes])

assert (await StaticSeeder.capture(nodes[1].client)) == start_capture


@pytest.mark.slow
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})
async def test_snapshot_bigger_than_maxmemory(df_factory: DflyInstanceFactory, df_seeder_factory):
"""
Test load snapshot that is bigger than max_memory, but contains more slots and should be load without OOM:
1) Create snapshot
2) split slots between 2 instances and reduce maxmemory
3) load snapshot to both instances
The result should be the same: instances contain all the data that was in snapshot
"""
dbfilename = f"dump_{tmp_file_name()}"
instances = [
df_factory.create(
port=next(next_port), admin_port=next(next_port), maxmemory="3G", dbfilename=dbfilename
),
df_factory.create(port=next(next_port), admin_port=next(next_port), maxmemory="1G"),
]
df_factory.start_all(instances)

nodes = [await create_node_info(n) for n in instances]

nodes[0].slots = [(0, 16383)]
nodes[1].slots = []

logging.debug("Push initial config")
await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes])

logging.debug("create data")
seeder = df_seeder_factory.create(
keys=30000, val_size=10000, port=nodes[0].instance.port, cluster_mode=True
)
await seeder.run(target_deviation=0.05)
capture = await seeder.capture()

logging.debug("SAVE")
await nodes[0].client.execute_command("SAVE", "rdb")

logging.debug("flushall")
for node in nodes:
await node.client.execute_command("flushall")
await node.client.execute_command("CONFIG SET maxmemory 1G")

nodes[0].slots = [(0, 8191)]
nodes[1].slots = [(8192, 16383)]

await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes])

for node in nodes:
await node.client.execute_command("DFLY", "LOAD", f"{dbfilename}.rdb")

assert await seeder.compare(capture, nodes[0].instance.port)

# prevent saving during shutdown
for node in nodes:
await node.client.execute_command("flushall")

0 comments on commit 92c3749

Please sign in to comment.