Skip to content

Commit

Permalink
PBM. Add test to check user restoration during PITR restore from sele…
Browse files Browse the repository at this point in the history
…ctive / full backup
  • Loading branch information
sandraromanchenko committed Dec 10, 2024
1 parent 657db21 commit d01d0bc
Show file tree
Hide file tree
Showing 2 changed files with 162 additions and 0 deletions.
8 changes: 8 additions & 0 deletions pbm-functional/pytest/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -625,6 +625,12 @@ def setup_authorization(host,uri):
'{"db":"admin","role":"clusterMonitor" },' +
'{"db":"admin","role":"restore" },' +
'{"db":"admin","role":"pbmAnyAction" }]});\'')
init_pbm_t_user = ('\'db.getSiblingDB("admin").createUser({user:"pbm_test",pwd:"pbmpass_test1","roles":[' +
'{"db":"admin","role":"readWrite","collection":""},' +
'{"db":"admin","role":"backup" },' +
'{"db":"admin","role":"clusterMonitor" },' +
'{"db":"admin","role":"restore" },' +
'{"db":"admin","role":"pbmAnyAction" }]});\'')
x509_pbm_user = ('\'db.getSiblingDB("$external").runCommand({createUser:"[email protected],CN=pbm,OU=client,O=Percona,L=SanFrancisco,ST=California,C=US","roles":[' +
'{"db":"admin","role":"readWrite","collection":""},' +
'{"db":"admin","role":"backup" },' +
Expand All @@ -645,6 +651,8 @@ def setup_authorization(host,uri):
'{"db":"admin","role":"pbmAnyAction" }]});\'')
logs = primary.check_output(
"mongo -u root -p root --quiet --eval " + init_pbm_user)
logs = primary.check_output(
"mongo -u root -p root --quiet --eval " + init_pbm_t_user)
#Cluster.log(logs)
if "authMechanism=MONGODB-X509" in uri:
logs = primary.check_output(
Expand Down
154 changes: 154 additions & 0 deletions pbm-functional/pytest/test_user_roles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
import pytest
import pymongo
import bson
import testinfra
import time
import os
import docker
import threading

from datetime import datetime
from cluster import Cluster


@pytest.fixture(scope="package")
def docker_client():
return docker.from_env()


@pytest.fixture(scope="package")
def config():
return {
"mongos": "mongos",
"configserver": {
"_id": "rscfg",
"members": [{"host": "rscfg01"}, {"host": "rscfg02"}, {"host": "rscfg03"}],
},
"shards": [
{
"_id": "rs1",
"members": [{"host": "rs101"}, {"host": "rs102"}, {"host": "rs103"}],
},
{
"_id": "rs2",
"members": [{"host": "rs201"}, {"host": "rs202"}, {"host": "rs203"}],
},
],
}

@pytest.fixture(scope="package")
def pbm_mongodb_uri():
return 'mongodb://pbm_test:[email protected]:27017/?authSource=admin'

@pytest.fixture(scope="package")
def newcluster(config, pbm_mongodb_uri):
return Cluster(config, pbm_mongodb_uri=pbm_mongodb_uri)

@pytest.fixture(scope="package")
def cluster(config):
return Cluster(config)


@pytest.fixture(scope="function")
def start_cluster(cluster, newcluster, request):
try:
cluster.destroy()
newcluster.destroy()
os.chmod("/backups", 0o777)
os.system("rm -rf /backups/*")
cluster.create()
cluster.setup_pbm()
yield True

finally:
if request.config.getoption("--verbose"):
cluster.get_logs()
try:
cluster.destroy(cleanup_backups=True)
except Exception as e:
newcluster.destroy(cleanup_backups=True)


def check_user(client, db_name, username, expected_roles):
try:
db_query = client.db.command({"usersInfo": {"user": username, "db": db_name}})
if db_query.get("ok") == 1 and len(db_query.get("users", [])) > 0:
roles = {role['role'] for role in db_query['users'][0]['roles']}
return roles == expected_roles
else:
return False
except pymongo.errors.OperationFailure as e:
return False

@pytest.mark.parametrize('restore_type',['part_bck','full_bck_part_rst_wo_user','full_bck_part_rst_user','full_bck'])
@pytest.mark.timeout(600, func_only=True)
def test_logical_PBM_T216(start_cluster, cluster, newcluster, restore_type):
cluster.check_pbm_status()
client = pymongo.MongoClient(cluster.connection)
client.admin.command("enableSharding", "test_db1")
client.admin.command("shardCollection", "test_db1.test_coll11", key={"_id": "hashed"})
client.admin.command('updateUser', 'pbm_test', pwd='pbmpass_test2')
client.admin.command('createUser', 'admin_random_user1', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
client.test_db1.command('createUser', 'test_random_user1', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
for i in range(10):
client["test_db1"]["test_coll11"].insert_one({"key": i, "data": i})
client["test_db2"]["test_coll21"].insert_one({"key": i, "data": i})
backup_full = cluster.make_backup("logical")
backup_partial = cluster.make_backup("logical --ns=test_db1.*,test_db2.*")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
client.admin.command('createUser', 'admin_random_user2', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
client.test_db1.command('createUser', 'test_random_user2', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
for i in range(10):
client["test_db1"]["test_coll11"].insert_one({"key": i+10, "data": i+10})
client["test_db2"]["test_coll21"].insert_one({"key": i+10, "data": i+10})
time.sleep(5)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
pitr = " --time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
time.sleep(10)
cluster.disable_pitr()
time.sleep(10)
client.drop_database("test_db1")
client.drop_database("test_db2")
client.admin.command("dropUser", "admin_random_user1")
client.admin.command("dropUser", "admin_random_user2")
client.test_db1.command("dropUser", "test_random_user1")
client.test_db1.command("dropUser", "test_random_user2")

# restoring users and roles from selective backup is not supported
restore_commands = {
'part_bck': " --base-snapshot=" + backup_partial + pitr,
'full_bck_part_rst_wo_user': " --base-snapshot=" + backup_full + pitr + " --ns=test_db1.*,test_db2.*",
'full_bck_part_rst_user': " --base-snapshot=" + backup_full + pitr + " --ns=test_db1.*,test_db2.* --with-users-and-roles",
'full_bck': " --base-snapshot=" + backup_full + pitr
}

# re-create cluster with new PBM user for connection to check that restore and connection to DB are OK
# despite the same user with different password is present in backup
if restore_type == 'full_bck':
cluster.destroy()
newcluster.create()
newcluster.setup_pbm()
newcluster.check_pbm_status()
newcluster.make_restore(restore_commands.get(restore_type), check_pbm_status=True)

Check failure on line 133 in pbm-functional/pytest/test_user_roles.py

View workflow job for this annotation

GitHub Actions / JUnit Test Report

test_user_roles.test_logical_PBM_T216[full_bck]

AssertionError: Starting restore 2024-12-11T00:27:40.623794104Z to point-in-time 2024-12-11T00:26:07 from '2024-12-11T00:24:59Z'..Error: no chunk with the target time, the last chunk ends on {1733876762 43} - Restore on replicaset "rscfg" in state: running - Restore on replicaset "rs2" in state: error: no chunk with the target time, the last chunk ends on {1733876762 43} - Restore on replicaset "rs1" in state: running
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f3572dcdc90>
newcluster = <cluster.Cluster object at 0x7f3573013690>
restore_type = 'full_bck'

    @pytest.mark.parametrize('restore_type',['part_bck','full_bck_part_rst_wo_user','full_bck_part_rst_user','full_bck'])
    @pytest.mark.timeout(600, func_only=True)
    def test_logical_PBM_T216(start_cluster, cluster, newcluster, restore_type):
        cluster.check_pbm_status()
        client = pymongo.MongoClient(cluster.connection)
        client.admin.command("enableSharding", "test_db1")
        client.admin.command("shardCollection", "test_db1.test_coll11", key={"_id": "hashed"})
        client.admin.command('updateUser', 'pbm_test', pwd='pbmpass_test2')
        client.admin.command('createUser', 'admin_random_user1', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
        client.test_db1.command('createUser', 'test_random_user1', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
        for i in range(10):
            client["test_db1"]["test_coll11"].insert_one({"key": i, "data": i})
            client["test_db2"]["test_coll21"].insert_one({"key": i, "data": i})
        backup_full = cluster.make_backup("logical")
        backup_partial = cluster.make_backup("logical --ns=test_db1.*,test_db2.*")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
        client.admin.command('createUser', 'admin_random_user2', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
        client.test_db1.command('createUser', 'test_random_user2', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
        for i in range(10):
            client["test_db1"]["test_coll11"].insert_one({"key": i+10, "data": i+10})
            client["test_db2"]["test_coll21"].insert_one({"key": i+10, "data": i+10})
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr = " --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(10)
        client.drop_database("test_db1")
        client.drop_database("test_db2")
        client.admin.command("dropUser", "admin_random_user1")
        client.admin.command("dropUser", "admin_random_user2")
        client.test_db1.command("dropUser", "test_random_user1")
        client.test_db1.command("dropUser", "test_random_user2")
    
        # restoring users and roles from selective backup is not supported
        restore_commands = {
            'part_bck': " --base-snapshot=" + backup_partial + pitr,
            'full_bck_part_rst_wo_user': " --base-snapshot=" + backup_full + pitr + " --ns=test_db1.*,test_db2.*",
            'full_bck_part_rst_user': " --base-snapshot=" + backup_full + pitr + " --ns=test_db1.*,test_db2.* --with-users-and-roles",
            'full_bck': " --base-snapshot=" + backup_full + pitr
        }
    
        # re-create cluster with new PBM user for connection to check that restore and connection to DB are OK
        # despite the same user with different password is present in backup
        if restore_type == 'full_bck':
            cluster.destroy()
            newcluster.create()
            newcluster.setup_pbm()
            newcluster.check_pbm_status()
>           newcluster.make_restore(restore_commands.get(restore_type), check_pbm_status=True)

test_user_roles.py:133: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7f3573013690>
name = ' --base-snapshot=2024-12-11T00:24:59Z --time=2024-12-11T00:26:07'
kwargs = {'check_pbm_status': True}
client = MongoClient(host=['mongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f3573038950>, exit_status=1, command=b'time...chunk with the target time, the last chunk ends on {1733876762 43}\n- Restore on replicaset "rs1" in state: running\n')
n = <testinfra.host.Host docker://rscfg01>, timeout = 240, error = ''
host = 'rscfg03', container = <Container: ddcfa2601546>

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        timeout=kwargs.get('timeout', 240)
        result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
    
        if result.rc == 0:
            Cluster.log(result.stdout)
        else:
            # try to catch possible failures if timeout exceeded
            error=''
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        logs = get_logs.output.decode('utf-8')
                        Cluster.log(logs)
                        if '"s":"F"' in logs:
                            error = logs
                except docker.errors.APIError:
                    pass
            if error:
                assert False, result.stdout + result.stderr + "\n" + error
            else:
>               assert False, result.stdout + result.stderr
E               AssertionError: Starting restore 2024-12-11T00:27:40.623794104Z to point-in-time 2024-12-11T00:26:07 from '2024-12-11T00:24:59Z'..Error: no chunk with the target time, the last chunk ends on {1733876762 43}
E               - Restore on replicaset "rscfg" in state: running
E               - Restore on replicaset "rs2" in state: error: no chunk with the target time, the last chunk ends on {1733876762 43}
E               - Restore on replicaset "rs1" in state: running

cluster.py:464: AssertionError
else:
cluster.make_restore(restore_commands.get(restore_type), check_pbm_status=True)

assert client["test_db1"]["test_coll11"].count_documents({}) == 20
assert client["test_db1"].command("collstats", "test_coll11").get("sharded", False)
assert client["test_db2"]["test_coll21"].count_documents({}) == 20
assert client["test_db2"].command("collstats", "test_coll21").get("sharded", True) is False

assert check_user(client, "admin", "admin_random_user1", {'readWrite', 'userAdminAnyDatabase', 'clusterAdmin'}) == \
(restore_type == 'full_bck'), \
f"Failed for {restore_type}: admin_random_user1 role mismatch"
assert check_user(client, "admin", "admin_random_user2", {'readWrite', 'userAdminAnyDatabase', 'clusterAdmin'}) == \
(restore_type == 'full_bck'), \
f"Failed for {restore_type}: admin_random_user2 role mismatch"
assert check_user(client, "test_db1", "test_random_user1", {'readWrite', 'clusterManager'}) == (restore_type not in \
['part_bck','full_bck_part_rst_wo_user']), \
f"Failed for {restore_type}: test_random_user1 role mismatch"
assert check_user(client, "test_db1", "test_random_user2", {'readWrite', 'clusterManager'}) == (restore_type not in \
['part_bck','full_bck_part_rst_wo_user','full_bck_part_rst_user']), \
f"Failed for {restore_type}: test_random_user2 role mismatch"
Cluster.log("Finished successfully")

0 comments on commit d01d0bc

Please sign in to comment.