Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PBM. Add tests for selective backup/restore and PBM-1391/PBM-1344, fix test names for zephyr #250

Merged
merged 1 commit into from
Dec 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/PBM-FULL.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ on:
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 120
timeout-minutes: 180
strategy:
fail-fast: false
matrix:
Expand Down
9 changes: 9 additions & 0 deletions pbm-functional/pytest/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,10 +490,19 @@ def destroy(self,**kwargs):
print("\n")
cleanup=kwargs.get('cleanup_backups', False)
if cleanup:
timeout = time.time() + 30
self.disable_pitr()
result=self.exec_pbm_cli("delete-pitr --all --force --yes ")
Cluster.log(result.stdout + result.stderr)
while True:
if not self.get_status()['running'] or time.time() > timeout:
break
result=self.exec_pbm_cli("delete-backup --older-than=0d --force --yes")
Cluster.log(result.stdout + result.stderr)
while True:
if not self.get_status()['running'] or time.time() > timeout:
break

for host in self.all_hosts:
try:
container = docker.from_env().containers.get(host)
Expand Down
2 changes: 1 addition & 1 deletion pbm-functional/pytest/test_PBM-1252.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def start_cluster(cluster,request):
cluster.destroy(cleanup_backups=True)

@pytest.mark.timeout(3600,func_only=True)
def test_load(start_cluster,cluster):
def test_load_PBM_T250(start_cluster,cluster):
cluster.check_pbm_status()
indexes = []

Expand Down
116 changes: 116 additions & 0 deletions pbm-functional/pytest/test_PBM-1344.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import pytest
import pymongo
import bson
import testinfra
import time
import os
import docker
import threading

from datetime import datetime
from cluster import Cluster

documents = [{"a": 1}, {"b": 2}, {"c": 3}, {"d": 4}]


@pytest.fixture(scope="package")
def docker_client():
return docker.from_env()


@pytest.fixture(scope="package")
def config():
return {
"_id": "rs1",
"members": [{"host": "rs101"}, {"host": "rs102"}, {"host": "rs103"}],
}


@pytest.fixture(scope="package")
def cluster(config):
return Cluster(config)


@pytest.fixture(scope="function")
def start_cluster(cluster, request):
try:
cluster.destroy()
cluster.create()
cluster.setup_pbm()
os.chmod("/backups", 0o777)
os.system("rm -rf /backups/*")
yield True
finally:
if request.config.getoption("--verbose"):
cluster.get_logs()
cluster.destroy(cleanup_backups=True)


@pytest.mark.timeout(600, func_only=True)
def test_physical_PBM_T279(start_cluster, cluster):
cluster.check_pbm_status()
client = pymongo.MongoClient(cluster.connection)
backup = cluster.make_backup("physical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc": i})
cluster.disable_pitr()
time.sleep(10)
cluster.delete_backup(backup)
cluster.destroy()

cluster.create()
cluster.setup_pbm()
time.sleep(10)
cluster.check_pbm_status()
backup = cluster.make_backup("physical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc": i})
time.sleep(5)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
backup = " --time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
cluster.disable_pitr()
time.sleep(10)
cluster.make_restore(backup, restart_cluster=True, check_pbm_status=True)
assert (
pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({})
== 10
)
Cluster.log("Finished successfully")


@pytest.mark.timeout(300, func_only=True)
def test_logical_PBM_T280(start_cluster, cluster):
cluster.check_pbm_status()
client = pymongo.MongoClient(cluster.connection)
backup = cluster.make_backup("logical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc": i})
cluster.disable_pitr()
time.sleep(10)
cluster.delete_backup(backup)
cluster.destroy()

cluster.create()
cluster.setup_pbm()
time.sleep(10)
cluster.check_pbm_status()
backup = cluster.make_backup("logical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc": i})
time.sleep(5)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
backup = " --time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
cluster.disable_pitr()
time.sleep(10)
cluster.make_restore(backup, check_pbm_status=True)
assert (
pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({})
== 10
)
Cluster.log("Finished successfully")
87 changes: 87 additions & 0 deletions pbm-functional/pytest/test_PBM-1391.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import pytest
import pymongo
import bson
import testinfra
import time
import os
import docker
import threading

from datetime import datetime
from cluster import Cluster

documents = [{"a": 1}, {"b": 2}, {"c": 3}, {"d": 4}]


@pytest.fixture(scope="package")
def docker_client():
return docker.from_env()


@pytest.fixture(scope="package")
def config():
return {
"mongos": "mongos",
"configserver": {
"_id": "rscfg",
"members": [{"host": "rscfg01"}, {"host": "rscfg02"}, {"host": "rscfg03"}],
},
"shards": [
{
"_id": "rs1",
"members": [{"host": "rs101"}, {"host": "rs102"}, {"host": "rs103"}],
},
{
"_id": "rs2",
"members": [{"host": "rs201"}, {"host": "rs202"}, {"host": "rs203"}],
},
],
}


@pytest.fixture(scope="package")
def cluster(config):
return Cluster(config)


@pytest.fixture(scope="function")
def start_cluster(cluster, request):
try:
cluster.destroy()
os.chmod("/backups", 0o777)
os.system("rm -rf /backups/*")
cluster.create()
cluster.setup_pbm()
client = pymongo.MongoClient(cluster.connection)
yield True

finally:
if request.config.getoption("--verbose"):
cluster.get_logs()
cluster.destroy(cleanup_backups=True)


@pytest.mark.timeout(600, func_only=True)
def test_physical_PBM_T278(start_cluster, cluster):
cluster.check_pbm_status()
client = pymongo.MongoClient(cluster.connection)
client.admin.command("enableSharding", "test")
client.admin.command("shardCollection", "test.test", key={"_id": "hashed"})
cluster.make_backup("physical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
time.sleep(5)
client["test"]["test"].insert_many(documents)
time.sleep(5)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
Cluster.log("Time for PITR is: " + pitr)
cluster.disable_pitr()
time.sleep(5)
client.drop_database("test")
backup = " --time=" + pitr
cluster.make_restore(backup, restart_cluster=True, check_pbm_status=True)
cluster.make_backup("physical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
time.sleep(10)
assert client["test"]["test"].count_documents({}) == len(documents)
assert client["test"].command("collstats", "test").get("sharded", False)
Cluster.log("Finished successfully")
4 changes: 2 additions & 2 deletions pbm-functional/pytest/test_directoryperdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def start_cluster(cluster,request):
cluster.destroy(cleanup_backups=True)

@pytest.mark.timeout(300,func_only=True)
def test_physical(start_cluster,cluster):
def test_physical_PBM_T220(start_cluster,cluster):
cluster.check_pbm_status()
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
backup=cluster.make_backup("physical")
Expand All @@ -47,7 +47,7 @@ def test_physical(start_cluster,cluster):
Cluster.log("Finished successfully")

@pytest.mark.timeout(300,func_only=True)
def test_incremental(start_cluster,cluster):
def test_incremental_PBM_T219(start_cluster,cluster):
cluster.check_pbm_status()
cluster.make_backup("incremental --base")
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
Expand Down
4 changes: 2 additions & 2 deletions pbm-functional/pytest/test_encryption.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def start_cluster(cluster,request):
cluster.destroy(cleanup_backups=True)

@pytest.mark.timeout(300,func_only=True)
def test_physical(start_cluster,cluster):
def test_physical_PBM_T223(start_cluster,cluster):
cluster.check_pbm_status()
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
backup=cluster.make_backup("physical")
Expand All @@ -47,7 +47,7 @@ def test_physical(start_cluster,cluster):
print("Finished successfully")

@pytest.mark.timeout(300,func_only=True)
def test_incremental(start_cluster,cluster):
def test_incremental_PBM_T222(start_cluster,cluster):
cluster.check_pbm_status()
cluster.make_backup("incremental --base")
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
Expand Down
77 changes: 67 additions & 10 deletions pbm-functional/pytest/test_replicaset.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,23 +46,80 @@ def test_logical(start_cluster,cluster):
cluster.check_pbm_status()
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_many(documents)
pymongo.MongoClient(cluster.connection)["test2"]["test21"].insert_many(documents)
pymongo.MongoClient(cluster.connection)["test2"]["test22"].insert_many(documents)
backup_partial=cluster.make_backup("logical --ns=test.test,test2.*")
backup_full=cluster.make_backup("logical")
pymongo.MongoClient(cluster.connection).drop_database('test')
pymongo.MongoClient(cluster.connection).drop_database('test2')
cluster.make_restore(backup_partial,check_pbm_status=True)
assert pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({}) == len(documents)
assert pymongo.MongoClient(cluster.connection)["test"]["test1"].count_documents({}) == 0
assert pymongo.MongoClient(cluster.connection)["test2"]["test21"].count_documents({}) == len(documents)
assert pymongo.MongoClient(cluster.connection)["test2"]["test22"].count_documents({}) == len(documents)
pymongo.MongoClient(cluster.connection).drop_database('test')
cluster.make_restore(backup_full,check_pbm_status=True)
assert pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({}) == len(documents)
assert pymongo.MongoClient(cluster.connection)["test"]["test1"].count_documents({}) == len(documents)
Cluster.log("Finished successfully")


@pytest.mark.timeout(300, func_only=True)
def test_logical_selective_PBM_T274(start_cluster, cluster):
cluster.check_pbm_status()
client = pymongo.MongoClient(cluster.connection)
for i in range(10):
client["test1"]["test_coll11"].insert_one({"key": i, "data": i})
client["test2"]["test_coll21"].insert_one({"key": i, "data": i})
client["test2"]["test_coll22"].insert_one({"key": i, "data": i})
client["test1"]["test_coll11"].create_index(["key"], name="test_coll11_index_old")
client["test2"]["test_coll21"].create_index(["key"], name="test_coll21_index_old")
backup_full = cluster.make_backup("logical")
backup_partial = cluster.make_backup("logical --ns=test1.test_coll11,test2.*")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
time.sleep(5)
client.drop_database("test1")
for i in range(10):
client["test1"]["test_coll11"].insert_one({"key": i + 10, "data": i + 10})
client["test1"]["test_coll11"].create_index("data", name="test_coll11_index_new")
client["test2"]["test_coll22"].create_index("data", name="test_coll22_index_new")
time.sleep(10)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
pitr = " --time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
cluster.disable_pitr()
time.sleep(10)
client.drop_database("test1")
client.drop_database("test2")
backup_partial = " --base-snapshot=" + backup_partial + pitr
backup_full = (
" --base-snapshot=" + backup_full + pitr + " --ns=test1.test_coll11,test2.*"
)
cluster.make_restore(backup_partial, check_pbm_status=True)
assert client["test1"]["test_coll11"].count_documents({}) == 10
assert client["test2"]["test_coll21"].count_documents({}) == 10
assert client["test2"]["test_coll22"].count_documents({}) == 10
for i in range(10):
assert client["test1"]["test_coll11"].find_one({"key": i + 10, "data": i + 10})
assert client["test2"]["test_coll21"].find_one({"key": i, "data": i})
assert client["test2"]["test_coll22"].find_one({"key": i, "data": i})
assert (
"test_coll11_index_old"
not in client["test1"]["test_coll11"].index_information()
)
assert "test_coll11_index_new" in client["test1"]["test_coll11"].index_information()
assert "test_coll21_index_old" in client["test2"]["test_coll21"].index_information()
assert "test_coll22_index_new" in client["test2"]["test_coll22"].index_information()
client.drop_database("test1")
client.drop_database("test2")
cluster.make_restore(backup_full, check_pbm_status=True)
assert client["test1"]["test_coll11"].count_documents({}) == 10
assert client["test2"]["test_coll21"].count_documents({}) == 10
assert client["test2"]["test_coll22"].count_documents({}) == 10
for i in range(10):
assert client["test1"]["test_coll11"].find_one({"key": i + 10, "data": i + 10})
assert client["test2"]["test_coll21"].find_one({"key": i, "data": i})
assert client["test2"]["test_coll22"].find_one({"key": i, "data": i})
assert (
"test_coll11_index_old"
not in client["test1"]["test_coll11"].index_information()
)
assert "test_coll11_index_new" in client["test1"]["test_coll11"].index_information()
assert "test_coll21_index_old" in client["test2"]["test_coll21"].index_information()
assert "test_coll22_index_new" in client["test2"]["test_coll22"].index_information()
Cluster.log("Finished successfully")


@pytest.mark.timeout(300,func_only=True)
def test_physical(start_cluster,cluster):
cluster.check_pbm_status()
Expand Down
Loading