Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create RDS database snapshot before executing alembic migrations #1267

Merged
merged 7 commits into from
May 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
67 changes: 67 additions & 0 deletions backend/deployment_triggers/dbsnapshots_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
"""
The handler of this module will be called once upon every deployment
"""

import logging
import os
import datetime
import boto3
import time
from alembic import command
from alembic.script import ScriptDirectory
from alembic.migration import MigrationContext
from alembic.config import Config
from dataall.base.db.connection import ENVNAME, get_engine

logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO'))


def handler(event, context) -> None:
"""
This function will be called once upon every deployment.
It checks if there are any alembic migration scripts to execute.
If there are, it will create a snapshot of the database.
It executes the alembic migration scripts.
"""
alembic_cfg = Config('alembic.ini')
alembic_cfg.set_main_option('script_location', './migrations')

# Get head version
script = ScriptDirectory.from_config(alembic_cfg)
head_rev = script.get_current_head()

# Get current version from database
engine = get_engine(ENVNAME)
with engine.engine.connect() as connection:
context = MigrationContext.configure(connection)
current_rev = context.get_current_revision()

if head_rev != current_rev:
snapshot_id = f'{os.environ.get("resource_prefix", "dataall")}-migration-{head_rev}-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
cluster_id = engine.dbconfig.host.split('.')[0]
logger.info(
f'Creating RDS snapshot for cluster {cluster_id}, head revision {head_rev} is ahead of {current_rev}...'
)
try:
rds_client = boto3.client('rds', region_name=os.getenv('AWS_REGION'))
# Edge case in which the cluster is performing backup and/or maintenance operations.
# If it times out the CICD pipeline fails and needs to be retried.
while (
cluster_status := rds_client.describe_db_clusters(DBClusterIdentifier=cluster_id)['DBClusters'][0][
'Status'
]
) != 'available':
logger.info(f'Waiting while the cluster is available, {cluster_status=}')
time.sleep(30)

rds_client.create_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshot_id,
DBClusterIdentifier=cluster_id,
Tags=[
{'Key': 'Application', 'Value': 'dataall'},
],
)
except Exception as e:
logger.exception(f'Failed to create RDS snapshot: {e}')
raise Exception(f'Failed to create RDS snapshot: {e}')
36 changes: 33 additions & 3 deletions deploy/stacks/backend_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,25 +310,55 @@ def __init__(
**kwargs,
)

db_snapshots = TriggerFunctionStack(
self,
'DbSnapshots',
handler='deployment_triggers.dbsnapshots_handler.handler',
envname=envname,
resource_prefix=resource_prefix,
vpc=vpc,
vpce_connection=vpce_connection,
image_tag=image_tag,
ecr_repository=repo,
execute_after=[aurora_stack.cluster],
connectables=[aurora_stack.cluster],
additional_policy_statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['rds:AddTagsToResource', 'rds:CreateDBClusterSnapshot'],
resources=[
f'arn:aws:rds:*:{self.account}:cluster-snapshot:{resource_prefix}*',
f'arn:aws:rds:*:{self.account}:cluster:{resource_prefix}*',
],
),
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['rds:DescribeDBClusters'],
resources=['*'],
),
],
**kwargs,
)

db_migrations = TriggerFunctionStack(
self,
'DbMigrations',
handler='dbmigrations_handler.handler',
handler='deployment_triggers.dbmigrations_handler.handler',
envname=envname,
resource_prefix=resource_prefix,
vpc=vpc,
vpce_connection=vpce_connection,
image_tag=image_tag,
ecr_repository=repo,
execute_after=[aurora_stack.cluster],
execute_after=[db_snapshots.trigger_function],
connectables=[aurora_stack.cluster],
**kwargs,
)

TriggerFunctionStack(
self,
'SavePerms',
handler='saveperms_handler.handler',
handler='deployment_triggers.saveperms_handler.handler',
envname=envname,
resource_prefix=resource_prefix,
vpc=vpc,
Expand Down
7 changes: 4 additions & 3 deletions deploy/stacks/trigger_function_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def __init__(
vpce_connection: ec2.IConnectable = None,
connectables: List[ec2.IConnectable] = [],
execute_after: List[Construct] = [],
additional_policy_statements: List[iam.PolicyStatement] = [],
**kwargs,
):
super().__init__(scope, id, **kwargs)
Expand All @@ -35,16 +36,16 @@ def __init__(
image_tag = self.node.try_get_context('image_tag')
image_tag = f'lambdas-{image_tag}'

env = {'envname': envname, 'LOG_LEVEL': 'INFO'}
env = {'envname': envname, 'resource_prefix': resource_prefix, 'LOG_LEVEL': 'INFO'}

function_sgs = self.create_lambda_sgs(envname, handler, resource_prefix, vpc)

statements = self.get_policy_statements(resource_prefix) + (additional_policy_statements or [])
self.trigger_function = TriggerFunction(
self,
f'TriggerFunction-{handler}',
function_name=f'{resource_prefix}-{envname}-{handler.replace(".", "_")}',
description=f'dataall {handler} trigger function',
initial_policy=self.get_policy_statements(resource_prefix),
initial_policy=statements,
code=_lambda.Code.from_ecr_image(repository=ecr_repository, tag=image_tag, cmd=[handler]),
vpc=vpc,
security_groups=[function_sgs],
Expand Down
Loading