Skip to content

Commit

Permalink
run db migrations in a custom resource (#1177)
Browse files Browse the repository at this point in the history
### Feature or Bugfix
- Bugfix

### Detail
Currently the DB is being initialised in the DBMigrations pipeline stage
(using CodeBuild) which runs after the BackendStage.
The SavePermissions TriggerFunction runs during the deployment of
Backend, just after deployment of the DBCluster.
As a result on clean deployments the SavePermissions step will fail
because the DB is uninitialized.

To resolve that in this PR we do the following
* remove DBMigrations stage based on CodeBuild
* run DBMigrations as part of a TriggerFunction/CustomResource after the
DB deployment
* run SavePermissions TriggerFunction after the DBMigrations
TriggerFunction

### Security
Please answer the questions below briefly where applicable, or write
`N/A`. Based on
[OWASP 10](https://owasp.org/Top10/en/).

- Does this PR introduce or modify any input fields or queries - this
includes
fetching data from storage outside the application (e.g. a database, an
S3 bucket)?
  - Is the input sanitized?
- What precautions are you taking before deserializing the data you
consume?
  - Is injection prevented by parametrizing queries?
  - Have you ensured no `eval` or similar functions are used?
- Does this PR introduce any functionality or component that requires
authorization?
- How have you ensured it respects the existing AuthN/AuthZ mechanisms?
  - Are you logging failed auth attempts?
- Are you using or adding any cryptographic features?
  - Do you use a standard proven implementations?
  - Are the used keys controlled by the customer? Where are they stored?
- Are you introducing any new policies/roles/users?
  - Have you used the least-privilege principle? How?


By submitting this pull request, I confirm that my contribution is made
under the terms of the Apache 2.0 license.
  • Loading branch information
petrkalos authored Apr 16, 2024
1 parent b634b41 commit 356a8a7
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 293 deletions.
18 changes: 18 additions & 0 deletions backend/dbmigrations_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
"""
The handler of this module will be called once upon every deployment
"""

import logging
import os

from alembic import command
from alembic.config import Config

logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO'))


def handler(event, context) -> None:
alembic_cfg = Config('alembic.ini')
alembic_cfg.set_main_option('script_location', './migrations')
command.upgrade(alembic_cfg, 'head') # logging breaks after this command
3 changes: 2 additions & 1 deletion backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ PyYAML==6.0
requests==2.31.0
requests_aws4auth==1.1.1
sqlalchemy==1.3.24
starlette==0.36.3
starlette==0.36.3
alembic==1.13.1
3 changes: 1 addition & 2 deletions backend/trigger_handler.py → backend/saveperms_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,9 @@

logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO'))
log = logging.getLogger(__name__)


def save_permissions(event, context):
def handler(event, context) -> None:
load_modules(modes={ImportMode.API})
envname = os.getenv('envname', 'local')
engine = get_engine(envname=envname)
Expand Down
35 changes: 17 additions & 18 deletions deploy/stacks/backend_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from .container import ContainerStack
from .cw_canaries import CloudWatchCanariesStack
from .cw_rum import CloudWatchRumStack
from .dbmigration import DBMigrationStack
from .lambda_api import LambdaApiStack
from .monitoring import MonitoringStack
from .opensearch import OpenSearchStack
Expand Down Expand Up @@ -220,21 +219,6 @@ def __init__(
**kwargs,
)

dbmigration_stack = DBMigrationStack(
self,
'DbMigration',
envname=envname,
resource_prefix=resource_prefix,
vpc=vpc,
s3_prefix_list=self.s3_prefix_list,
tooling_account_id=tooling_account_id,
pipeline_bucket=pipeline_bucket,
vpce_connection=vpce_connection,
codeartifact_domain_name=codeartifact_domain_name,
codeartifact_pip_repo_name=codeartifact_pip_repo_name,
**kwargs,
)

if quicksight_enabled:
pivot_role_in_account = iam.Role(
self,
Expand Down Expand Up @@ -321,22 +305,37 @@ def __init__(
self.lambda_api_stack.api_handler,
],
ecs_security_groups=self.ecs_stack.ecs_security_groups,
codebuild_dbmigration_sg=dbmigration_stack.codebuild_sg,
prod_sizing=prod_sizing,
quicksight_monitoring_sg=quicksight_monitoring_sg,
**kwargs,
)

db_migrations = TriggerFunctionStack(
self,
'DbMigrations',
handler='dbmigrations_handler.handler',
envname=envname,
resource_prefix=resource_prefix,
vpc=vpc,
vpce_connection=vpce_connection,
image_tag=image_tag,
ecr_repository=repo,
execute_after=[aurora_stack.cluster],
connectables=[aurora_stack.cluster],
**kwargs,
)

TriggerFunctionStack(
self,
'SavePerms',
handler='trigger_handler.save_permissions',
handler='saveperms_handler.handler',
envname=envname,
resource_prefix=resource_prefix,
vpc=vpc,
vpce_connection=vpce_connection,
image_tag=image_tag,
ecr_repository=repo,
execute_after=[db_migrations.trigger_function],
connectables=[aurora_stack.cluster],
**kwargs,
)
Expand Down
239 changes: 0 additions & 239 deletions deploy/stacks/dbmigration.py

This file was deleted.

32 changes: 0 additions & 32 deletions deploy/stacks/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,6 @@ def __init__(
)
)

self.set_db_migration_stage(
target_env,
)

if target_env.get('enable_update_dataall_stacks_in_cicd_pipeline', False):
self.set_stacks_updater_stage(target_env)

Expand Down Expand Up @@ -653,34 +649,6 @@ def set_backend_stage(self, target_env, repository_name):
)
return backend_stage

def set_db_migration_stage(
self,
target_env,
):
migration_wave = self.pipeline.add_wave(f"{self.resource_prefix}-{target_env['envname']}-dbmigration-stage")
migration_wave.add_post(
pipelines.CodeBuildStep(
id='MigrateDB',
build_environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_5,
),
commands=[
'mkdir ~/.aws/ && touch ~/.aws/config',
'echo "[profile buildprofile]" > ~/.aws/config',
f'echo "role_arn = arn:aws:iam::{target_env["account"]}:role/{self.resource_prefix}-{target_env["envname"]}-cb-dbmigration-role" >> ~/.aws/config',
'echo "credential_source = EcsContainer" >> ~/.aws/config',
'aws sts get-caller-identity --profile buildprofile',
f'aws codebuild start-build --project-name {self.resource_prefix}-{target_env["envname"]}-dbmigration --profile buildprofile --region {target_env.get("region", self.region)} > codebuild-id.json',
f'aws codebuild batch-get-builds --ids $(jq -r .build.id codebuild-id.json) --profile buildprofile --region {target_env.get("region", self.region)} > codebuild-output.json',
f'while [ "$(jq -r .builds[0].buildStatus codebuild-output.json)" != "SUCCEEDED" ] && [ "$(jq -r .builds[0].buildStatus codebuild-output.json)" != "FAILED" ]; do echo "running migration"; aws codebuild batch-get-builds --ids $(jq -r .build.id codebuild-id.json) --profile buildprofile --region {target_env.get("region", self.region)} > codebuild-output.json; echo "$(jq -r .builds[0].buildStatus codebuild-output.json)"; sleep 5; done',
'if [ "$(jq -r .builds[0].buildStatus codebuild-output.json)" = "FAILED" ]; then echo "Failed"; cat codebuild-output.json; exit -1; fi',
'cat codebuild-output.json ',
],
role=self.expanded_codebuild_role.without_policy_updates(),
vpc=self.vpc,
),
)

def set_stacks_updater_stage(
self,
target_env,
Expand Down
Loading

0 comments on commit 356a8a7

Please sign in to comment.