Skip to content

Commit

Permalink
Merge pull request #144 from base2Services/develop
Browse files Browse the repository at this point in the history
Release 0.9.8
  • Loading branch information
tarunmenon95 authored Apr 19, 2023
2 parents a84f52b + f6200ec commit 140b498
Show file tree
Hide file tree
Showing 10 changed files with 181 additions and 24 deletions.
2 changes: 1 addition & 1 deletion deploy-sam-template.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -e

SHELVERY_VERSION=0.9.7
SHELVERY_VERSION=0.9.8

# set DOCKERUSERID to current user. could be changed with -u uid
DOCKERUSERID="-u $(id -u)"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from setuptools import setup

setup(name='shelvery', version='0.9.7', author='Base2Services R&D',
setup(name='shelvery', version='0.9.8', author='Base2Services R&D',
author_email='[email protected]',
url='http://github.com/base2Services/shelvery-aws-backups',
classifiers=[
Expand Down
2 changes: 1 addition & 1 deletion shelvery/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = '0.9.7'
__version__ = '0.9.8'
LAMBDA_WAIT_ITERATION = 'lambda_wait_iteration'
S3_DATA_PREFIX = 'backups'
SHELVERY_DO_BACKUP_TAGS = ['True', 'true', '1', 'TRUE']
5 changes: 3 additions & 2 deletions shelvery/backup_resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def construct(cls,

def entity_resource_tags(self):
return self.entity_resource.tags if self.entity_resource is not None else {}

def calculate_expire_date(self, engine, custom_retention_types=None):
"""Determine expire date, based on 'retention_type' tag"""
if self.retention_type == BackupResource.RETENTION_DAILY:
Expand Down Expand Up @@ -197,11 +197,12 @@ def region(self, region: str):
self.__region = region

def set_retention_type(self, retention_type: str):
self.retention_type = retention_type
self.name = '-'.join(self.name.split('-')[0:-1]) + f"-{retention_type}"
self.tags[f"{self.tags['shelvery:tag_name']}:name"] = self.name
self.tags['Name'] = self.name
self.tags[f"{self.tags['shelvery:tag_name']}:retention_type"] = retention_type

@property
def boto3_tags(self):
tags = self.tags
Expand Down
51 changes: 49 additions & 2 deletions shelvery/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import logging
import time
import sys
from unittest import skip

import botocore
import yaml
Expand Down Expand Up @@ -34,6 +35,13 @@ class ShelveryEngine:
DEFAULT_KEEP_WEEKLY = 8
DEFAULT_KEEP_MONTHLY = 12
DEFAULT_KEEP_YEARLY = 10

RETENTION_TYPE_PRECEDENCE = {
BackupResource.RETENTION_YEARLY : BackupResource.RETENTION_MONTHLY,
BackupResource.RETENTION_MONTHLY : BackupResource.RETENTION_WEEKLY,
BackupResource.RETENTION_WEEKLY : BackupResource.RETENTION_DAILY,
BackupResource.RETENTION_DAILY : None
}

BACKUP_RESOURCE_TAG = 'create_backup'

Expand Down Expand Up @@ -173,6 +181,18 @@ def _write_backup_data(self, backup, bucket, shared_account_id=None):
self.logger.info(f"Wrote meta for backup {backup.name} of type {self.get_engine_type()} to" +
f" s3://{bucket.name}/{s3key}")

def _verify_retention(self,backup_resource: BackupResource) -> bool:
if backup_resource.retention_type == backup_resource.RETENTION_DAILY:
return RuntimeConfig.get_keep_daily(backup_resource.entity_resource_tags(),self) != 0
elif backup_resource.retention_type == backup_resource.RETENTION_WEEKLY:
return RuntimeConfig.get_keep_weekly(backup_resource.entity_resource_tags(),self) != 0
elif backup_resource.retention_type == backup_resource.RETENTION_MONTHLY:
return RuntimeConfig.get_keep_monthly(backup_resource.entity_resource_tags(),self) != 0
elif backup_resource.retention_type == backup_resource.RETENTION_YEARLY:
return RuntimeConfig.get_keep_yearly(backup_resource.entity_resource_tags(),self) != 0

# fail open
return True

### Top level methods, invoked externally ####
def create_backups(self) -> List[BackupResource]:
Expand All @@ -183,7 +203,7 @@ def create_backups(self) -> List[BackupResource]:
self.logger.info(f"Collecting entities of type {resource_type} tagged with "
f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
resources = self.get_entities_to_backup(f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")

# allows user to select single entity to be backed up
if RuntimeConfig.get_shelvery_select_entity(self) is not None:
entity_id = RuntimeConfig.get_shelvery_select_entity(self)
Expand All @@ -206,9 +226,33 @@ def create_backups(self) -> List[BackupResource]:
copy_resource_tags=RuntimeConfig.copy_resource_tags(self),
exluded_resource_tag_keys=RuntimeConfig.get_exluded_resource_tag_keys(self)
)

# if retention is explicitly given by runtime environment
if current_retention_type is not None:
backup_resource.set_retention_type(current_retention_type)

# Check whether current retention is allowed, if not try next retention type by precedence
skip_backup = False

# skip validation if custom retention type
if backup_resource.retention_type in self.RETENTION_TYPE_PRECEDENCE:
# Check whether current retention is allowed, if not try next retention type by precedence
while not self._verify_retention(backup_resource):
self.logger.info(f"Retention Type: {backup_resource.retention_type} disabled")
new_retention_type = self.RETENTION_TYPE_PRECEDENCE[backup_resource.retention_type]
self.logger.info(f"Checking whether retention type: {new_retention_type} is permitted")
if new_retention_type:
backup_resource.set_retention_type(new_retention_type)
else:
#Set skip backup to true as daily is set to 0
skip_backup = True
break
else:
self.logger.info(f"Skipping retention check as custom retention type {backup_resource.retention_type} was detected")

# Skip current backup
if skip_backup:
continue

dr_regions = RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self)
backup_resource.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_regions"] = ','.join(dr_regions)
Expand Down Expand Up @@ -631,7 +675,10 @@ def do_share_backup(self, map_args={}, **kwargs):

self.logger.info(f"Do share backup {backup_id} ({backup_region}) with {destination_account_id}")
try:
self.share_backup_with_account(backup_region, backup_id, destination_account_id)
new_backup_id = self.share_backup_with_account(backup_region, backup_id, destination_account_id)
#assign new backup id if new snapshot is created (eg: re-encrypted rds snapshot)
backup_id = new_backup_id if new_backup_id else backup_id
self.logger.info(f"Shared backup {backup_id} ({backup_region}) with {destination_account_id}")
backup_resource = self.get_backup_resource(backup_region, backup_id)
self._write_backup_data(
backup_resource,
Expand Down
64 changes: 58 additions & 6 deletions shelvery/rds_backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,25 +95,77 @@ def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]:

def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str):
rds_client = AwsHelper.boto3_client('rds', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id)
backup_resource = self.get_backup_resource(backup_region, backup_id)
kms_key = RuntimeConfig.get_reencrypt_kms_key_id(backup_resource.tags, self)

# if a re-encrypt key is provided, create new re-encrypted snapshot and share that instead
if kms_key:
self.logger.info(f"Re-encrypt KMS Key found, creating new backup with {kms_key}")
# create re-encrypted backup
backup_id = self.copy_backup_to_region(backup_id, backup_region)
self.logger.info(f"Creating new encrypted backup {backup_id}")
# wait till new snapshot is available
if not self.wait_backup_available(backup_region=backup_region,
backup_id=backup_id,
lambda_method='do_share_backup',
lambda_args={}):
return
self.logger.info(f"New encrypted backup {backup_id} created")

#Get new snapshot ARN
snapshots = rds_client.describe_db_snapshots(DBSnapshotIdentifier=backup_id)
snapshot_arn = snapshots['DBSnapshots'][0]['DBSnapshotArn']

#Update tags with '-re-encrypted' suffix
self.logger.info(f"Updating tags for new snapshot - {backup_id}")
tags = self.get_backup_resource(backup_region, backup_id).tags
tags.update({'Name': backup_id, 'shelvery:name': backup_id})
tag_list = [{'Key': key, 'Value': value} for key, value in tags.items()]
rds_client.add_tags_to_resource(
ResourceName=snapshot_arn,
Tags=tag_list
)
created_new_encrypted_snapshot = True
else:
self.logger.info(f"No re-encrypt key detected")
created_new_encrypted_snapshot = False

rds_client.modify_db_snapshot_attribute(
DBSnapshotIdentifier=backup_id,
AttributeName='restore',
ValuesToAdd=[aws_account_id]
)
# if re-encryption occured, clean up old snapshot
if created_new_encrypted_snapshot:
# delete old snapshot
self.delete_backup(backup_resource)
self.logger.info(f"Cleaning up un-encrypted backup: {backup_resource.backup_id}")

return backup_id

def copy_backup_to_region(self, backup_id: str, region: str) -> str:
local_region = boto3.session.Session().region_name
client_local = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id)
rds_client = AwsHelper.boto3_client('rds', region_name=region, arn=self.role_arn, external_id=self.role_external_id)
snapshots = client_local.describe_db_snapshots(DBSnapshotIdentifier=backup_id)
snapshot = snapshots['DBSnapshots'][0]
rds_client.copy_db_snapshot(
SourceDBSnapshotIdentifier=snapshot['DBSnapshotArn'],
TargetDBSnapshotIdentifier=backup_id,
SourceRegion=local_region,
backup_resource = self.get_backup_resource(local_region, backup_id)
kms_key = RuntimeConfig.get_reencrypt_kms_key_id(backup_resource.tags, self)
rds_client_params = {
'SourceDBSnapshotIdentifier': snapshot['DBSnapshotArn'],
'TargetDBSnapshotIdentifier': backup_id,
'SourceRegion': local_region,
# tags are created explicitly
CopyTags=False
)
'CopyTags': False
}
# add kms key params if reencrypt key is defined
if kms_key is not None:
backup_id = f'{backup_id}-re-encrypted'
rds_client_params['KmsKeyId'] = kms_key
rds_client_params['CopyTags'] = True
rds_client_params['TargetDBSnapshotIdentifier'] = backup_id

rds_client.copy_db_snapshot(**rds_client_params)
return backup_id

def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource:
Expand Down
65 changes: 58 additions & 7 deletions shelvery/rds_cluster_backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,25 +96,76 @@ def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]:

def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str):
rds_client = AwsHelper.boto3_client('rds', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id)
backup_resource = self.get_backup_resource(backup_region, backup_id)
kms_key = RuntimeConfig.get_reencrypt_kms_key_id(backup_resource.tags, self)

# if a re-encrypt key is provided, create new re-encrypted snapshot and share that instead
if kms_key:
self.logger.info(f"Re-encrypt KMS Key found, creating new backup with {kms_key}")
# create re-encrypted backup
backup_id = self.copy_backup_to_region(backup_id, backup_region)
self.logger.info(f"Creating new encrypted backup {backup_id}")
# wait till new snapshot is available
if not self.wait_backup_available(backup_region=backup_region,
backup_id=backup_id,
lambda_method='do_share_backup',
lambda_args={}):
return
self.logger.info(f"New encrypted backup {backup_id} created")

#Get new snapshot ARN
snapshots = rds_client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id)
snapshot_arn = snapshots['DBClusterSnapshots'][0]['DBClusterSnapshotArn']

#Update tags with '-re-encrypted' suffix
self.logger.info(f"Updating tags for new snapshot - {backup_id}")
tags = self.get_backup_resource(backup_region, backup_id).tags
tags.update({'Name': backup_id, 'shelvery:name': backup_id})
tag_list = [{'Key': key, 'Value': value} for key, value in tags.items()]
rds_client.add_tags_to_resource(
ResourceName=snapshot_arn,
Tags=tag_list
)
created_new_encrypted_snapshot = True
else:
self.logger.info(f"No re-encrypt key detected")
created_new_encrypted_snapshot = False

rds_client.modify_db_cluster_snapshot_attribute(
DBClusterSnapshotIdentifier=backup_id,
AttributeName='restore',
ValuesToAdd=[aws_account_id]
)
# if re-encryption occured, clean up old snapshot
if created_new_encrypted_snapshot:
# delete old snapshot
self.delete_backup(backup_resource)
self.logger.info(f"Cleaning up un-encrypted backup: {backup_resource.backup_id}")

return backup_id

def copy_backup_to_region(self, backup_id: str, region: str) -> str:
local_region = boto3.session.Session().region_name
client_local = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id)
rds_client = AwsHelper.boto3_client('rds', region_name=region)
snapshots = client_local.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id)
snapshot = snapshots['DBClusterSnapshots'][0]
rds_client.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier=snapshot['DBClusterSnapshotArn'],
TargetDBClusterSnapshotIdentifier=backup_id,
SourceRegion=local_region,
# tags are created explicitly
CopyTags=False
)
backup_resource = self.get_backup_resource(local_region, backup_id)
kms_key = RuntimeConfig.get_reencrypt_kms_key_id(backup_resource.tags, self)
rds_client_params = {
'SourceDBClusterSnapshotIdentifier': snapshot['DBClusterSnapshotArn'],
'TargetDBClusterSnapshotIdentifier': backup_id,
'SourceRegion': local_region,
'CopyTags': False
}
# add kms key params if re-encrypt key is defined
if kms_key is not None:
backup_id = f'{backup_id}-re-encrypted'
rds_client_params['KmsKeyId'] = kms_key
rds_client_params['CopyTags'] = True
rds_client_params['TargetDBClusterSnapshotIdentifier'] = backup_id

rds_client.copy_db_cluster_snapshot(**rds_client_params)
return backup_id

def copy_shared_backup(self, source_account: str, source_backup: BackupResource):
Expand Down
10 changes: 8 additions & 2 deletions shelvery/runtime_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ class RuntimeConfig:
when enabled 'shelvery_copy_kms_key_id' must also be set.
shelvery_copy_kms_key_id - when copying a shared snapshot, you can specify a different kms key used to encrypt the original snapshot.
Note that when copying to a new key, the shelvery requires access to both the new key and the original key.
shelvery_reencrypt_kms_key_id - when re-encrypting a snapshot with a new KMS key before sharing it to a new account.
"""

DEFAULT_KEEP_DAILY = 14
Expand Down Expand Up @@ -102,7 +103,8 @@ class RuntimeConfig:
'shelvery_sqs_queue_wait_period': 0,
'shelvery_ignore_invalid_resource_state': False,
'shelvery_encrypt_copy': False,
'shelvery_copy_kms_key_id': None
'shelvery_copy_kms_key_id': None,
'shelvery_reencrypt_kms_key_id': None
}

@classmethod
Expand Down Expand Up @@ -334,4 +336,8 @@ def get_encrypt_copy(cls, resource_tags, engine):

@classmethod
def get_copy_kms_key_id(cls, resource_tags, engine):
return cls.get_conf_value('shelvery_copy_kms_key_id', resource_tags, engine.lambda_payload)
return cls.get_conf_value('shelvery_copy_kms_key_id', resource_tags, engine.lambda_payload)

@classmethod
def get_reencrypt_kms_key_id(cls, resource_tags, engine):
return cls.get_conf_value('shelvery_reencrypt_kms_key_id', resource_tags, engine.lambda_payload)
2 changes: 1 addition & 1 deletion shelvery_tests/rds_cluster_integration_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def setUp(self):

# Complete initial setup and create service client
initSetup(self,'rds')
rdsclient = AwsHelper.boto3_client('docdb', region_name='ap-southeast-2')
rdsclient = AwsHelper.boto3_client('rds', region_name='ap-southeast-2')


#Get cluster name
Expand Down
2 changes: 1 addition & 1 deletion template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ Resources:
Tags:
Name: Shelvery
CreatedBy: Shelvery
ShelveryVersion: 0.9.7
ShelveryVersion: 0.9.8

Environment:
Variables:
Expand Down

0 comments on commit 140b498

Please sign in to comment.