From dec8b5c710df070dc292900c524abe5e06547910 Mon Sep 17 00:00:00 2001 From: Derrick Burns Date: Tue, 8 Oct 2019 09:23:47 -0700 Subject: [PATCH] Moved tpctl to eks-template repo --- bin/get_k8s_secret_from_aws | 25 +- cmd/Dockerfile | 44 -- cmd/README.md | 644 ----------------- cmd/agent.sh | 11 - cmd/build.sh | 4 - cmd/external_secret | 77 -- cmd/push.sh | 2 - cmd/separate_files | 41 -- cmd/tpctl | 46 -- cmd/tpctl.sh | 1358 ----------------------------------- 10 files changed, 6 insertions(+), 2246 deletions(-) delete mode 100644 cmd/Dockerfile delete mode 100644 cmd/README.md delete mode 100755 cmd/agent.sh delete mode 100755 cmd/build.sh delete mode 100755 cmd/external_secret delete mode 100755 cmd/push.sh delete mode 100755 cmd/separate_files delete mode 100755 cmd/tpctl delete mode 100755 cmd/tpctl.sh diff --git a/bin/get_k8s_secret_from_aws b/bin/get_k8s_secret_from_aws index 46cdc95a2..8d44246e9 100755 --- a/bin/get_k8s_secret_from_aws +++ b/bin/get_k8s_secret_from_aws @@ -2,35 +2,22 @@ # # Get Secret from AWS Secrets Manager # -# Usage: $0 $SECRET $ENVIRONMENT +# Usage: $0 $SECRETID -if [ $# -ne 2 ] +if [ $# -ne 1 ] then - echo "Usage: $0 \$SECRET \$ENVIRONMENT" + echo "Usage: $0 \$SECRETID, e.g. $0 development/qa1/mongo" + exit 1 fi -SECRET=$1 -ENVIRONMENT=$2 - -if [ -z "$CLUSTER_NAME" ] -then - echo "\$CLUSTER_NAME required." - exit -fi - -out=$(aws secretsmanager get-secret-value --secret-id ${CLUSTER_NAME}/${ENVIRONMENT}/${SECRET} | jq '.SecretString') - -val=$(echo "echo $out" | bash) -yaml=$(python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)' <<< $val) - -indented=$(echo "$yaml" | sed -e 's/^/ /') +yaml=$(aws secretsmanager get-secret-value --secret-id $1 | jq '.SecretString | fromjson' | yq r - | sed -e 's/^/ /') cat <tpctl -#!/bin/bash - -HELM_HOME=\${HELM_HOME:-~/.helm} -KUBE_CONFIG=\${KUBECONFIG:-~/.kube/config} -AWS_CONFIG=\${AWS_CONFIG:-~/.aws} -GIT_CONFIG=\${GIT_CONFIG:-~/.gitconfig} -SSH_HOME=\${SSH_HOME:-~/.ssh} - -mkdir -p \$HELM_HOME -if [ ! -f "\$KUBE_CONFIG" ] -then - touch \$KUBE_CONFIG -fi - -docker run --rm -it \ --e REMOTE_REPO=\${REMOTE_REPO} \ --e GITHUB_TOKEN=\${GITHUB_TOKEN} \ --v \${SSH_HOME}:/root/.ssh \ --v \${HELM_HOME}:/root/.helm \ --v \${AWS_CONFIG}:/root/.aws \ --v \${KUBE_CONFIG}:/root/.kube/config \ --v \${GIT_CONFIG}:/root/.gitconfig \ -tidepool/tpctl /root/tpctl \$* -! -chmod +x tpctl -``` - -Alternatively, you may build your own local Docker image from the source by cloning the Tidepool `development` repo and running the `build.sh` script: -```bash -git clone git@github.com:tidepool-org/development -cd development/cmd -./build.sh -``` - -Thereafter, you may use the `tpctl` script provided. - -## Authentication - -`tpctl` interacts with several external services on your behalf. `tpctl` must authenticate itself. - -To do so, `tpctl` must access your credentials stored on your local machine. This explains the need for the numerous directories that are mounted into the Docker container. - -We explain these in detail below. If the assumptions we make are incorrect for your environment, you may set the environment variables used in the file to match your environment: - -```bash -HELM_HOME=${HELM_HOME:-~/.helm} -KUBE_CONFIG=${KUBECONFIG:-~/.kube/config} -AWS_CONFIG=${AWS_CONFIG:-~/.aws} -GIT_CONFIG=${GIT_CONFIG:-~/.gitconfig} -``` - -### GitHub -In order to update your Git configuration repo with the tags of new versions of Docker images that you use, you must provide a [GitHub personal access token](https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line) with repo scope to access private repositories. - -```bash -export GITHUB_TOKEN=.... -``` - -### AWS -In order to create and query AWS resources, you must provide access to your AWS credentials. We assume that you store those -credentials in the standard place, -``` -~/.aws/credentials -``` - -`tpctl` mounts `~/.aws` inside the Docker container to access the credentials. - -### Kubernetes -In order to access your Kubernetes cluster, you must provide access to the file that stores your Kubernetes configurations. We assume that you store that file in: -``` -~/.kube/config -``` - -`tpctl` mounts `~/.kube` inside the Docker container to access that file. - -### Helm -In order to provide you access to the Kubernetes cluster via the `helm` client, you must provide access to the directory that stores your `helm` client credentials. That directory is typically stored at: -``` -~/.helm -``` - `tpctl` populates that directory with a TLS certificate and keys that are needed to communicate with the `helm` installer. - -### Git -In order to make Git commits, `tpctl` needs your Git username and email. This is typically stored in: -``` -~/.gitconfig -``` -`tpctl` mounts that file. - -Check your `~/.gitconfig`. It must have entries for `email` and `name` such as: -```ini -[user] - email = derrick@tidepool.org - name = Derrick Burns -``` -If it does not, then add them by running this locally: - -```bash -git config --global user.email "you@example.com" -git config --global user.name "Your Name" -``` - -### SSH -In order to clone the `flux` tool repo, `tpctl` needs access to your GitHub public key. This is typically stored in: - -``` -~/.ssh/id_rsa -``` - -## Execution Environment - -Most of the operations of `tpctl` either use or manipulate a GitHub repository. You may use `tpctl` to configure an existing GitHub repository. To do so, provide the name of the repository as the *full name* (including `git@`): - -```bash -export REMOTE_REPO=git@github.com:tidepool-org/cluster-test1 -``` - -Alternatively, if you have not already created a GitHub repository you may create one using `tpctl`: -```bash -tpctl repo -``` - -## Basic Usage - -To create a EKS cluster running the Tidepool services with GitOps -and a service mesh that provides HIPAA compliance, you perform -a series of steps: - -### Create a GitHub Configuration Repository - -This creates an empty *private* GitHub repository for storing the desired state of your EKS -cluster. We call this the *config repo*. -```bash -tpctl repo -``` - -### Create a Configuration File - -This creates a file in your GitHub config repo called `values.yaml` that contains -all the data needed to construct the other Kubernetes configuration files. Under normal circumstances, this is the *only* file that you will manually edit. - -```bash -tpctl values -``` - -In this file, you find parameters that you may change to customize the installation. - -By default, the cluster name is derived from the GitHub repository name. You may override it. - -In addition, the default `values.yaml` file defines a single Tidepool environment named `qa2`. You must modify this environment or add others. - -Importantly, be sure to set the DNS names for your Tidepool services. Assuming that you have the authority to do so, TLS certificates are automatically generated for the names that your provide and DNS aliases to the DNS names you provide are also created. - -### Generate the Configuration - -From the `values.yaml` file `tpctl` can generate all the Kubernetes manifest files, the AWS IAM roles and policies, and the `eksctl` `ClusterConfig` file that is used to build a cluster. Do this after you have created and edited your `values.yaml` file. If you edit your `values.yaml` file, rerun this step: - -```bash -tpctl config - ``` - -### Create an AWS EKS Cluster - -Once you have generated the manifest files, you may create your EKS cluster. - -```bash -tpctl cluster -``` - -This step takes *15-20 minutes*, during which time AWS provisions a new EKS cluster. It will result in a number of AWS Cloudformation stacks being generated. These stacks will have the prefix: `eksctl-${ClusterName}-`. - -### Install a Service Mesh - -A service mesh encrypt inter-service traffic to ensure that personal health information (PHI) is protected in transit from exposure tounauthorized parties. - -You may install a service mesh as follows. - -```bash -tpctl mesh -``` - -This must be done *before* the next step because the mesh intercepts future requests to install resources into your cluster. In some cases, it will add a sidecar to your pods. This is called `automatic sidecar injection`. So, if your mesh is not running, those pods will not have a sidecar to encrypt their traffic. - -If that happens, install the mesh then delete the pods manually that were added when the mesh was non-operational. - -### Install the Flux GitOps Controller - -The Flux GitOps controller keeps your Kubernetes cluster up to date with the contents of the GitHub configuration repo. It also keeps your GitHub configuration repo up to date with the latest versions of Docker images of your services that are published in Docker Hub. - -To install the GitOps operator: - - -```bash -tpctl flux -``` - -In addition, this command installs the `tiller` server (the counterpart to the `Helm` client) and creates and installs TLS certificates that the Helm client needs to communicate with `tiller` server. - -## Common Issues - -Sometimes, one of the steps will fail. Most of the time, you can simply retry that step. However, in the case of `tpctl cluster` and `tpctl mesh`, certain side-effects -persist that may impede your progress. - -### Delete a Cluster - -To reverse the side-effects of `tpctl cluster`, you may delete your cluster and await the completion of the deletion: - -```bash -tpctl delete_cluster await_deletion -``` -Deleting a cluster will take roughtly 10 minutes. - -### Delete a Service Mesh - -To reverse the side-effects of `tpctl mesh`, you may delete your mesh with: - -```bash -tpctl remove_mesh -``` - -## Advanced Usage -In addition to the basic commands above, you may: - -### Edit A Configuration File - -We do not recommend that you make manual changes to the files in your config repo, *except* the `values.yaml` file. - -However, you may access the GitHub configuration repo using standard Git commands. In addition, `tpctl` makes it convenient to clone the repo into a directory for you to make changes. - -With this command, `tpctl` opens a shell with a clone of the config repo in the current directory. You may makes changes to that clone as you see fit. When you exit the shell, `tpctl` will commit those changes (with your permission) and push them to GitHub. - -```bash -tpctl edit_repo -``` - -### Regenerate Helm Client Certs - -If you are managing multiple Kubernetes clusters with a TLS-enabled `tiller`, you must switch between TLS certificates. You may use this command to change to or regenerate the TLS certificates in your `~/.helm` directory: - -```bash -tpctl regenerate_cert -``` - -### Edit Your values.yaml File - -If you need to modify the configuration parameters in the `values.yaml` file, you may do so with standard Git commands to operate on your Git repo. `tpctl` makes it even easier by checking out the Git repo on your behalf and opening the `vi` editor: - -```bash -tpctl edit_values -``` - -### Copy S3 Assets To A New Bucket - -If you are launching a new cluster, you must provide S3 assets for email verification. You may copy the standard assets by using this command: - -```bash -tpctl copy_assets -``` - -### Migrate Legacy Secrets - -If you are migrating from one of the Tidepool legacy environments, you may migrate the secrets that are used in one of those environments to AWS Secrets Manager and modify your configuration repo to access those secrets: - -```bash -tpctl migrate_secrets -``` - -### Generate and Persist Random Secrets - -If you are creating a new environment, you can generate a new set of secrets and persist those secrets in AWS Secrets Manager and modify your configuration repot to access those secrets: - -```bash -tpctl randomize_secrets -``` - -### Load Plaintext Secrets - -If you have secrets to persist and use in your cluster, such as those provided by a third party vendor, you may upload those secrets to AWS Secrets Manager and update your config repo to access those secrets by providing those secrets (as *plaintext* Kubernetes secrets) via the standard input to `tpctl`: - -```bash -tpctl upsert_plaintext_secrets -``` - -### Add system:master Users - -If you have additional `system:master` users to add to your cluster, you may add them to your `values.yaml` file and run this command to install them in your cluster: - -```bash -tpctl install_users -``` - -This operation is not idempotent. Any users will be added to the existing set of users. So, only run this if you are adding new system master users. - -You may inspect the existing set of users with: -``` -kubectl describe configmap -n kube-system aws-auth -``` - -Here is example output: - -```bash -$ kubectl describe configmap -n kube-system aws-auth -Name: aws-auth -Namespace: kube-system -Labels: -Annotations: - -Data -==== -mapRoles: ----- -- groups: - - system:bootstrappers - - system:nodes - rolearn: arn:aws:iam::118346523422:role/eksctl-qatest-nodegroup-ng-1-NodeInstanceRole-1L2G21MV64ISS - username: system:node:{{EC2PrivateDNSName}} -- groups: - - system:bootstrappers - - system:nodes - rolearn: arn:aws:iam::118346523422:role/eksctl-qatest-nodegroup-ng-kiam-NodeInstanceRole-1TKZB1U4OVJDW - username: system:node:{{EC2PrivateDNSName}} -- groups: - - system:masters - rolearn: arn:aws:iam::118346523422:user/lennartgoedhart-cli - username: lennartgoedhart-cli -- groups: - - system:masters - rolearn: arn:aws:iam::118346523422:user/benderr-cli - username: benderr-cli -- groups: - - system:masters - rolearn: arn:aws:iam::118346523422:user/derrick-cli - username: derrick-cli -- groups: - - system:masters - rolearn: arn:aws:iam::118346523422:user/mikeallgeier-cli - username: mikeallgeier-cli -``` - -### Upload Deploy Key To GitHub Config Repo - -In order to manipulate your Github config repo, Flux needs to be authorized to do so. This authorization step is normally performed when `flux` is installed with `tpctl flux`. -Should you delete and reinstall Flux manually, it will create a new public key that you must provide to your GitHub repo in order to authenticate Flux and authorize it to modify the repo. You do that with: - - ```bash - tpctl deploy_key - ``` - -You may inspect your Github config repo to see that the key was deployed by going to the `Settings` tab of the config repo and looking under `Deploy Keys`. - -### Initiate Deletion of Your AWS EKS Cluster - -If you wish to delete a AWS EKS cluster that you created with `tpctl`, you may do so with: - -```bash -tpctl delete_cluster -``` - -Note that this only starts the process. The command returns *before* the process has completed. -The entire process may take up to 20 minutes. - -### Await Completion Of Deletion Of Your AWS EKS Cluster - -To await the completion of the deletion of an AWS EKS cluster, you may do this: - -```bash -tpctl await_deletion -``` - -### Merge/Copy the KUBECONFIG Into the Your Local $KUBECONFIG File - -You may change which cluster that `kubectl` accesses by changing the file that is uses to access your cluster or by changing its contents. That file is identified in the environment variable `KUBECONFIG`. - -If you are only managing a single cluster, then you can simply set that environment variable to point to that file. - -However, in the common case that you are manipulating several clusters, it may be inconvenient to change that environment variable every time you want to switch clusters. - -To address this common case, a single `KUBECONFIG` file may contain the information needed to access multiple clusters. It also contains an indication of *which* of those clusters to access. -The latter indicator may be easily modified with the `kubectx` command. - -We store a `KUBECONFIG` file in your config repo that only contains the info needed for the associated cluster. - -You may merge the `KUBECONFIG` file from your config repo into a local `KUBECONFIG` file called `~/.kube/config` using: - -```bash -tpctl merge_kubeconfig -``` -Then, you may use `kubectx` to select which cluster to modify. - -### Open the Gloo Dashboard - -We use the Gloo API Gateway. If you would like to see the gateways, virtual services, and/or routes that are installed, you may use this command to open up a web page to the Gloo dashboard: - -```bash -tpctl gloo_dashboard -``` - -### Open the Service Mesh Dashboard - -If you have installed a service mesh, you may view a dashboard to monitor traffic in a web page: - -```bash -tpctl linkerd_dashboard -``` - -### Create Managed Policies - -```bash -tpctl managed_policies -``` - -### Show Recent git diff - -If you would like to see the most recent changes to your config repo, you may use standard Git tools, or you may simply run: - -```bash -tpctl diff -``` - -## Inside The values.yaml File - -Your primary configuration file, `values.yaml`, contains all the information needed to create your Kubernetes cluster and its services. Here is an annotated example: - -### GitHub Config - -This section establishes where the GitHub repo is located. -```yaml -github: - git: git@github.com:tidepool-org/cluster-test1 - https: https://github.com/tidepool-org/cluster-test1 -``` - -### Logging Config -This section provides the default log level for the services that run in the -cluster. - -```yaml -logLevel: debug # the default log level for all services -``` - -### Cluster Administration Configuration -This section provides an email address for the administrator of the cluster. -```yaml -email: derrick@tidepool.org # cluster admin email address -``` - -### AWS Configuration -This section provides the AWS account number and the IAM users who are to -be granted `system:master` privileges on the cluster: - -```yaml -aws: - accountNumber: 118346523422 # AWS account number - iamUsers: # AWS IAM users who will be grants system:master privileges to the cluster - - derrickburns-cli - - lennartgoedhard-cli - - benderr-cli - - jamesraby-cli - - haroldbernard-cli -``` - -### Kubectl Access Configuration -This secion provides the default location of the Kubernetes cluster configuration file. - -```yaml -kubeconfig: "$HOME/.kube/config" # place to put KUBECONFIG -``` -### Cluster Provisioning Configuration -This sections provides a description of the AWS cluster itself, including its -name, region, size, networking config, and IAM policies. -```yaml - -cluster: - metadata: - name: test1 # name of the cluster - region: us-west-2 # AWS region to host the cluster - cloudWatch: # AWS cloudwatch configuration - clusterLogging: - enableTypes: # Types of log messages to persist to CloudWatch - - authenticator - - api - - controllerManager - - scheduler - vpc: # Amazon VPC configuration - cidr: "10.47.0.0/16" # CIDR of AWS VPC - nodeGroups: - - instanceType: "m5.large" # AWS instance type for workers - desiredCapacity: 4 # initial capacity of auto scaling group of workers - minSize: 1 # minimum size of auto scaling group of workers - maxSize: 10 # maximum size of auto scaling group of workers - name: ng - iam: # AWS IAM policies to attach to the nodes in the cluster - attachPolicyARNs: - - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy - - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy - - arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess - withAddonPolicies: - autoScaler: true - certManager: true - externalDNS: true -``` - -### Optional Service Configuration - -There are a number of services that can be installed by `tpctl` to run in your -Kubernetes cluster. This section allows you to select the services -that you want to enable: -```yaml - -pkgs: - amazon-cloudwatch: # AWS CloudWatch logging - enabled: true - external-dns: # External DNS maintains DNS aliases to Amazon ELBs - enabled: true - gloo: # Gloo provides the API Gateway - enabled: true - gloo-crds: # Gloo CRDs define the Custom Resource Definitions - enabled: true - prometheus-operator: # Prometheus Operator creates Prometheus instances - enabled: true - certmanager: # Certmanager issues TLS certificates - enabled: true - cluster-autoscaler: # Cluster autoscaler scales the nodes in the cluster as needed - enabled: true - external-secrets: # External secrets loads persisted secrets from AWS Secrets Manager - enabled: true - reloader: # Reloader restarts services on secrets/configmap changes - enabled: true - datadog: # Datadog send telemetry to the hosted Datadog service - enabled: false - flux: # Flux provides GitOps - enabled: true - fluxcloud: # Fluxcloud sends GitOps notifications to Slack - enabled: false - username: "derrickburns" - secret: slack # Name of secret in which Slack webhook URL is provided - #channel: foo # Slack channel on which to post notifications - metrics-server: # Metrics server collects Node level metrics and sends to Prometheus - enabled: true - sumologic: # Sumologic collects metrics and logs and sents to the hosted service - enabled: false - thanos: # Thanos aggregates telemetry from all Tidepool clusters - enabled: true - bucket: tidepool-thanos # Writable S3 bucket in which to aggregate multi-cluster telemetry data - secret: thanos-objstore-config # Name of Kubernetes secret in which Thanos config is stored. -``` - -### Tidepool Environment Configuration -The last section allows you to configure the Tidepool environments that you run in your cluster. - - -```yaml -environments: - qa2: - mongodb: - enabled: true # Whether to use an embedded mongodb - tidepool: - source: stg # Where to get initial secrets from - enabled: true - hpa: # Whether to implement horizontal pod scalers for each service - enabled: true - nosqlclient: # Whether to deploy a nosqlclient to query Mongo data - enabled: true - mongodb: - enabled: true - gitops: - branch: develop # Which branch to use for automatic image updates - buckets: {} # Which S3 buckets to store/retrieve data to/from - #data: tidepool-test-qa2-data # Name of the writable S3 bucket to store document data to - #asset: tidepool-test-qa2-asset # Name of the readable S3 bucker form which to get email assets - certificate: - secret: tls # Name of the K8s secret to store the TLS certificate for the hosts served - issuer: letsencrypt-staging # Name of the Certificate Issuer to use - gateway: - default: # Default protocol to use for communication for email verification - protocol: http - http: - enabled: true # Whether to offer HTTP access - dnsNames: # DNS Names of the HTTP hosts to serve - - localhost - https: - enabled: false # Whether to offer HTTPS access - dnsNames: # DNS Names of the HTTPS hosts to serve - - qa2.tidepool.org -``` diff --git a/cmd/agent.sh b/cmd/agent.sh deleted file mode 100755 index 4b7ab5c0b..000000000 --- a/cmd/agent.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# Add ssh key to ssh-agent running in a Docker container. -# -# Usage: $0 [${SSH_KEY:-id_rsa}] -# - -SSH_KEY=${1:-id_rsa} -docker run -d --name=ssh-agent nardeas/ssh-agent -docker run --rm --volumes-from=ssh-agent -v ~/.ssh:/.ssh -it nardeas/ssh-agent ssh-add /root/.ssh/${SSH_KEY} - diff --git a/cmd/build.sh b/cmd/build.sh deleted file mode 100755 index 7aadcd040..000000000 --- a/cmd/build.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -DIR=$(dirname $0) -cd $DIR -docker build -t tidepool/tpctl . diff --git a/cmd/external_secret b/cmd/external_secret deleted file mode 100755 index 2d834d7f8..000000000 --- a/cmd/external_secret +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/local/bin/python3 -# -# Create/update/delete secrets managed in Amazon External Secrets -# -# Usage: $0 file create|update|upsert|delete|dryrun ${CLUSTER_NAME} -# encoded|plaintext - -import base64 -import boto3 -import json -import os -import sys -import yaml - -if len(sys.argv) != 4: - print("Usage: external_secret create|update|upsert|delete|dryrun cluster encoded|plaintext < secrets_file") - exit() - -op = sys.argv[1] -cluster = sys.argv[2] -encoded = sys.argv[3] - -input = sys.stdin -docs = yaml.safe_load_all(input) -for manifest in docs: - if not manifest or "metadata" not in manifest: - continue - env = manifest["metadata"]["namespace"] - base = manifest["metadata"]["name"] - data = manifest["data"] - - secret = dict() - secret["apiVersion"] = "kubernetes-client.io/v1" - secret["kind"] = "ExternalSecret" - secret["metadata"] = manifest["metadata"] - secret["secretDescriptor"] = dict() - secret["secretDescriptor"]["backendType"] = "secretsManager" - secret["secretDescriptor"]["data"] = list() - - values = list() - awsvalue = dict() - client = boto3.client('secretsmanager') - key = cluster + "/" + env + "/" + base - - for name, value in data.items(): - if encoded == "encoded" or encoded == "true": - decoded = base64.standard_b64decode(value).decode("utf-8") - else: - decoded = value - value = dict() - value["key"] = key - value["name"] = name - value["property"] = name - awsvalue[value["name"]] = decoded - values.append(value) - - secret["secretDescriptor"]["data"] = values - print("---") - print(yaml.dump(secret)) - out = json.dumps(awsvalue) - - if op == "upsert": - try: - result = client.describe_secret(SecretId=key) - client.update_secret(SecretId=key, SecretString=out) - except: - client.create_secret(Name=key, SecretString=out) - elif op == "create": - client.create_secret(Name=key, SecretString=out) - elif op == "update": - client.update_secret(SecretId=key, SecretString=out) - elif op == "delete": - client.delete_secret(SecretId=key) - elif op == "dryrun": - pass - else: - print("unknown operation", op) diff --git a/cmd/push.sh b/cmd/push.sh deleted file mode 100755 index 4f7a423cc..000000000 --- a/cmd/push.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -docker push tidepool/tpctl diff --git a/cmd/separate_files b/cmd/separate_files deleted file mode 100755 index 8814a49f1..000000000 --- a/cmd/separate_files +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/python3 -# -# Generate separate files (into the current directory) from a YAML map whose keys are the file names and the values are file contents -# Optionally add a header and a footer -# -# Usagte: $0 ${HEADER} ${FOOTER} -# - -import yaml -import os -import sys -import re -import errno - -input = yaml.safe_load_all(sys.stdin) - - -def convert(name): - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() - - -def mkdir_p(path): - try: - os.makedirs(path) - except OSError as exc: # Python >2.5 - if exc.errno == errno.EEXIST and os.path.isdir(path): - pass - else: - raise - -for value in input: - if value != None and "kind" in value: - path = "%s/%s" % (convert(value["metadata"]["namespace"]) - if "namespace" in value["metadata"] else "global", value["kind"]) - name = "%s.yaml" % value["metadata"]["name"] - mkdir_p(path) - print(path + "/" + name) - with open(path + "/" + name, "w") as out: - yaml.dump(value, out) - out.flush() diff --git a/cmd/tpctl b/cmd/tpctl deleted file mode 100755 index 8b4bc1976..000000000 --- a/cmd/tpctl +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -# set defaults -HELM_HOME=${HELM_HOME:-~/.helm} -KUBE_CONFIG=${KUBECONFIG:-~/.kube/config} -AWS_CONFIG=${AWS_CONFIG:-~/.aws} -GIT_CONFIG=${GIT_CONFIG:-~/.gitconfig} -SSH_KEY=${SSH_KEY:-id_rsa} -AGENT_CONTAINER="" - -function shutdown_agent { - docker kill ssh-agent >/dev/null - docker rm $AGENT_CONTAINER >/dev/null -} - -running=$(docker inspect -f '{{.State.Running}}' ssh-agent 2>/dev/null) -if [ $? -ne 0 -o "$running" == "false" ] -then - AGENT_CONTAINER=$(docker run -d --name=ssh-agent nardeas/ssh-agent) - docker run --rm --volumes-from=ssh-agent -v ~/.ssh:/.ssh -it nardeas/ssh-agent ssh-add /root/.ssh/${SSH_KEY} - trap shutdown_agent EXIT -fi - -if [ -z "$GITHUB_TOKEN" ] -then - echo "GITHUB_TOKEN with repo scope is needed." - exit 1 -fi - -mkdir -p $HELM_HOME -if [ ! -f "$KUBE_CONFIG" ] -then - touch $KUBE_CONFIG -fi - -docker run -it \ ---volumes-from=ssh-agent \ --e SSH_AUTH_SOCK=/.ssh-agent/socket \ --e GITHUB_TOKEN=${GITHUB_TOKEN} \ --e REMOTE_REPO=${REMOTE_REPO} \ --v ${GIT_CONFIG}:/root/.gitconfig:ro \ --v ${HELM_HOME}:/root/.helm \ --v ${AWS_CONFIG}:/root/.aws:ro \ --v ${KUBE_CONFIG}:/root/.kube/config \ -tidepool/tpctl /root/tpctl $* - diff --git a/cmd/tpctl.sh b/cmd/tpctl.sh deleted file mode 100755 index 3d98b26c4..000000000 --- a/cmd/tpctl.sh +++ /dev/null @@ -1,1358 +0,0 @@ -#!/bin/bash -ix -# -# Configure EKS cluster to run Tidepool services -# - -set -o pipefail - -function cluster_in_context { - context=$(KUBECONFIG=$(get_kubeconfig) kubectl config current-context 2>/dev/null) - if [ $? -eq 0 ] - then - echo $context - else - echo "none" - fi -} - -function make_envrc { - local context=$(get_context) - context=$(yq r kubeconfig.yaml current-context) - echo "kubectx $context" >.envrc - add_file ".envrc" -} - -function cluster_in_repo { - yq r kubeconfig.yaml -j current-context | sed -e 's/"//g' -e "s/'//g" -} - -function get_sumo_accessID { - echo $1 | jq '.accessID' | sed -e 's/"//g' -} - -function get_sumo_accessKey { - echo $1 | jq '.accessKey' | sed -e 's/"//g' -} - -function install_sumo { - start "installing sumo" - local config=$(get_config) - local cluster=$(get_cluster) - local namespace=$(require_value "pkgs.sumologic.namespace") - local apiEndpoint=$(require_value "pkgs.sumologic.apiEndpoint") - local sumoSecret=$(aws secretsmanager get-secret-value --secret-id $cluster/$namespace/sumologic | jq '.SecretString | fromjson') - local accessID=$(get_sumo_accessID $sumoSecret) - local accessKey=$(get_sumo_accessKey $sumoSecret) - curl -s https://raw.githubusercontent.com/SumoLogic/sumologic-kubernetes-collection/master/deploy/docker/setup/setup.sh \ - | bash -s - -k $cluster -n $namespace -d false $apiEndpoint $accessID $accessKey > pkgs/sumologic/sumologic.yaml - complete "installed sumo" -} - - -function add_gloo_manifest { - config=$1 - file=$2 - (cd gloo; \ - jsonnet --tla-code config="$config" $TEMPLATE_DIR/gloo/${file}.yaml.jsonnet | separate_files | add_names; \ - expect_success "Templating failure gloo/$1.yaml.jsonnet") -} - -# install gloo -function install_gloo { - start "installing gloo" - local config=$(get_config) - jsonnet --tla-code config="$config" $TEMPLATE_DIR/gloo/gloo-values.yaml.jsonnet | yq r - > $TMP_DIR/gloo-values.yaml - expect_success "Templating failure gloo/gloo-values.yaml.jsonnet" - - rm -rf gloo - mkdir -p gloo - (cd gloo; glooctl install gateway -n gloo-system --values $TMP_DIR/gloo-values.yaml --dry-run | separate_files | add_names) - expect_success "Templating failure gloo/gloo-values.yaml.jsonnet" - add_gloo_manifest "$config" gateway-ssl - add_gloo_manifest "$config" gateway - add_gloo_manifest "$config" settings - - glooctl install gateway -n gloo-system --values $TMP_DIR/gloo-values.yaml - expect_success "Gloo installation failure" - completed "installed gloo" -} - -function confirm_matching_cluster { - local in_context=$(cluster_in_context) - local in_repo=$(cluster_in_repo) - if [ "${in_repo}" != "${in_context}" ] - then - echo "${in_context} is cluster selected in KUBECONFIG config file" - echo "${in_repo} is cluster named in $REMOTE_REPO repo" - confirm "Is $REMOTE_REPO the repo you want to use? " - fi -} - -function establish_ssh { - ssh-add -l &>/dev/null - if [ "$?" == 2 ]; then - # Could not open a connection to your authentication agent. - - # Load stored agent connection info. - test -r ~/.ssh-agent && \ - eval "$(<~/.ssh-agent)" >/dev/null - - ssh-add -l &>/dev/null - if [ "$?" == 2 ]; then - # Start agent and store agent connection info. - (umask 066; ssh-agent > ~/.ssh-agent) - eval "$(<~/.ssh-agent)" >/dev/null - fi - fi - - # Load identities - ssh-add -l &>/dev/null - if [ "$?" == 1 ]; then - # The agent has no identities. - # Time to add one. - ssh-add -t 4h - fi -} - -# set up colors to use for output -function define_colors { - RED=`tput setaf 1` - GREEN=`tput setaf 2` - MAGENTA=`tput setaf 5` - RESET=`tput sgr0` -} - -# irrecoverable error. Show message and exit. -function panic { - echo "${RED}[✖] ${1}${RESET}" - exit 1 -} - -# confirm that previous command succeeded, otherwise panic with message -function expect_success { - if [ $? -ne 0 ] - then - panic "$1" - fi -} - -# show info message -function start { - echo "${GREEN}[i] ${1}${RESET}" -} - -# show info message -function complete { - echo "${MAGENTA}[√] ${1}${RESET}" -} - -# show info message -function info { - echo "${MAGENTA}[√] ${1}${RESET}" -} - -# report that file is being added to config repo -function add_file { - echo "${GREEN}[ℹ] adding ${1}${RESET}" -} - -# report all files added to config repo from list given in stdin -function add_names { - while read -r line - do - add_file $line - done -} - -# report renaming of file in config repo -function rename_file { - echo "${GREEN}[√] renaming ${1} ${2}${RESET}" -} - -# conform action, else exit -function confirm { - if [ "$APPROVE" != "true" ] - then - local msg=$1 - read -p "${RED}$msg${RESET} " -n 1 -r - if [[ ! $REPLY =~ ^[Yy]$ ]] - then - exit 1 - else - echo - fi - fi -} - -# require that REMOTE_REPO env variable exists, expand REMOTE_REPO into full name -function check_remote_repo { - - if [ -z "$REMOTE_REPO" ] - then - panic "must provide REMOTE_REPO" - fi - - if [[ $REMOTE_REPO != */* ]] - then - GIT_REMOTE_REPO="git@github.com:tidepool-org/$REMOTE_REPO" - else - GIT_REMOTE_REPO=$REMOTE_REPO - fi - HTTPS_REMOTE_REPO=$(echo $GIT_REMOTE_REPO | sed -e "s#git@github.com:#https://github.com/#") - -} - -# clean up all temporary files -function cleanup { - if [ -f "$TMP_DIR" ] - then - cd / - rm -rf $TMP_DIR - fi -} - - -# create temporary workspace to clone Git repos into, change to that directory -function setup_tmpdir { - if [[ ! -d $TMP_DIR ]]; then - start "creating temporary working directory" - TMP_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'TMP_DIR'` - complete "created temporary working directory" - trap cleanup EXIT - cd $TMP_DIR - fi -} - -function repo_with_token { - local repo=$1 - echo $repo | sed -e "s#https://#https://$GITHUB_TOKEN@#" -} - - -# clone config repo, change to that directory -function clone_remote { - cd $TMP_DIR - if [[ ! -d $(basename $HTTPS_REMOTE_REPO) ]]; then - start "cloning remote" - git clone $(repo_with_token $HTTPS_REMOTE_REPO) - expect_success "Cannot clone $HTTPS_REMOTE_REPO" - complete "cloned remote" - fi - cd $(basename $HTTPS_REMOTE_REPO) -} - -# clone quickstart repo, export TEMPLATE_DIR -function set_template_dir { - if [[ ! -d $TEMPLATE_DIR ]]; then - start "cloning quickstart" - pushd $TMP_DIR >/dev/null 2>&1 - git clone $(repo_with_token https://github.com/tidepool-org/eks-template) - export TEMPLATE_DIR=$(pwd)/eks-template - popd >/dev/null 2>&1 - complete "cloned quickstart" - fi -} - -# clone development repo, exports DEV_DIR and CHART_DIR -function set_tools_dir { - if [[ ! -d $CHART_DIR ]]; then - start "cloning development tools" - pushd $TMP_DIR >/dev/null 2>&1 - git clone $(repo_with_token https://github.com/tidepool-org/development) - cd development - git checkout develop - DEV_DIR=$(pwd) - CHART_DIR=${DEV_DIR}/charts/tidepool/0.1.7 - popd >/dev/null 2>&1 - complete "cloned development tools" - fi -} - -# clone secret-map repo, export SM_DIR -function clone_secret_map { - if [[ ! -d $SM_DIR ]]; then - start "cloning secret-map" - pushd $TMP_DIR >/dev/null 2>&1 - git clone $(repo_with_token https://github.com/tidepool-org/secret-map) - SM_DIR=$(pwd)/secret-map - popd >/dev/null 2>&1 - complete "cloned secret-map" - fi -} - -# get values file -function get_config { - yq r values.yaml -j -} - -# retrieve value from values file, or exit if it is not available -function require_value { - local val=$(yq r values.yaml -j $1 | sed -e 's/"//g' -e "s/'//g") - if [ $? -ne 0 -o "$val" == "null" -o "$val" == "" ] - then - panic "Missing $1 from values.yaml file." - fi - echo $val -} - -# retrieve name of cluster -function get_cluster { - require_value "cluster.metadata.name" -} - -# retrieve name of region -function get_region { - require_value "cluster.metadata.region" -} - -# retrieve email address of cluster admin -function get_email { - require_value "email" -} - -# retrieve AWS account number -function get_aws_account { - require_value "aws.accountNumber" -} - -# retrieve list of AWS environments to create -function get_environments { - yq r values.yaml environments | sed -e "/^ .*/d" -e s/:.*// -} - -# retrieve list of K8s system masters -function get_iam_users { - yq r values.yaml aws.iamUsers | sed -e "s/- //" -e 's/"//g' -} - -# retrieve bucket name or create from convention -function get_bucket { - local env=$1 - local kind=$2 - local bucket=$(yq r values.yaml environments.${env}.tidepool.buckets.${kind} | sed -e "/^ .*/d" -e s/:.*//) - if [ "$bucket" == "null" ] - then - local cluster=$(get_cluster) - echo "tidepool-${cluster}-${env}-${kind}" - else - echo $bucket - fi -} - -# create Tidepool assets bucket -function make_assets { - local env - for env in $(get_environments) - do - local bucket=$(get_bucket $env asset) - start "creating asset bucket $bucket" - aws s3 mb s3://$bucket - info "copying dev assets into $bucket" - aws s3 cp s3://tidepool-dev-asset s3://$bucket - complete "created asset bucket $bucket" - done -} - -# retrieve helm home -function get_helm_home { - echo ${HELM_HOME:-~/.helm} -} - -# make TLS certificate to allow local helm client to access tiller with TLS -function make_cert { - local cluster=$(get_cluster) - local helm_home=$(get_helm_home) - - start "installing helm client cert for cluster $cluster" - - info "retrieving ca.pem from AWS secrets manager" - aws secretsmanager get-secret-value --secret-id $cluster/flux/ca.pem | jq '.SecretString' | sed -e 's/"//g' \ --e 's/\\n/\ -/g' >$TMP_DIR/ca.pem - - expect_success "failed to retrieve ca.pem from AWS secrets manager" - - info "retrieving ca-key.pem from AWS secrets manager" - aws secretsmanager get-secret-value --secret-id $cluster/flux/ca-key.pem | jq '.SecretString' | sed -e 's/"//g' \ --e 's/\\n/\ -/g' >$TMP_DIR/ca-key.pem - - expect_success "failed to retrieve ca-key.pem from AWS secrets manager" - - local helm_cluster_home=${helm_home}/clusters/$cluster - - info "creating cert in ${helm_cluster_home}" - local tiller_hostname=tiller-deploy.flux - local user_name=helm-client - - echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","server auth","client auth"]}}}' > $TMP_DIR/ca-config.json - echo '{"CN":"'$user_name'","hosts":[""],"key":{"algo":"rsa","size":4096}}' | cfssl gencert \ - -config=$TMP_DIR/ca-config.json -ca=$TMP_DIR/ca.pem -ca-key=$TMP_DIR/ca-key.pem \ - -hostname="$tiller_hostname" - | cfssljson -bare $user_name - - rm -rf $helm_cluster_home - mkdir -p $helm_cluster_home - mv helm-client.pem $helm_cluster_home/cert.pem - add_file $helm_cluster_home/cert.pem - mv helm-client-key.pem $helm_cluster_home/key.pem - rm helm-client.csr - add_file $helm_cluster_home/key.pem - cp $TMP_DIR/ca.pem $helm_cluster_home/ca.pem - add_file $helm_cluster_home/ca.pem - rm -f $helm_home/{cert.pem,key.pem,ca.pem} - cp $helm_cluster_home/{cert.pem,key.pem,ca.pem} $helm_home - - if [ "$TILLER_NAMESPACE" != "flux" -o "$HELM_TLS_ENABLE" != "true" ] - then - info "you must do this to use helm:" - info "export TILLER_NAMESPACE=flux" - info "export HELM_TLS_ENABLE=true" - fi - complete "installed helm client cert for cluster $cluster" -} - -# config availability of GITHUB TOKEN in environment -function expect_github_token { - if [ -z "$GITHUB_TOKEN" ] - then - panic "\$GITHUB_TOKEN required. https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line" - fi -} - -# retrieve kubeconfig value -function get_kubeconfig { - local kc=$(require_value "kubeconfig") - realpath $(eval "echo $kc") -} - -# create EKS cluster using config.yaml file, add kubeconfig to config repo -function make_cluster { - local cluster=$(get_cluster) - start "creating cluster $cluster" - eksctl create cluster -f config.yaml --kubeconfig ./kubeconfig.yaml - expect_success "eksctl create cluster failed." - git pull - add_file "./kubeconfig.yaml" - make_envrc - complete "created cluster $cluster" -} - -function merge_kubeconfig { - local local_kube_config=$(realpath ./kubeconfig.yaml) - local kubeconfig=$(get_kubeconfig) - if [ "$kubeconfig" != "$local_kube_config" ] - then - if [ -f "$kubeconfig" ] - then - info "merging kubeconfig into $kubeconfig" - KUBECONFIG=$kubeconfig:$local_kube_config kubectl config view --flatten >$TMP_DIR/updated.yaml - cat $TMP_DIR/updated.yaml > $kubeconfig - else - mkdir -p $(dirname $kubeconfig) - info "creating new $kubeconfig" - cat $local_kube_config > $kubeconfig - fi - fi -} - -# confirm that values file exists or panic -function expect_values_exists { - if [ ! -f values.yaml ] - then - panic "No values.yaml file." - fi -} - -# remove computed pkgs -function reset_config_dir { - mv values.yaml $TMP_DIR/ - if [ $(ls | wc -l) -ne 0 ] - then - confirm "Are you sure that you want to remove prior contents (except values.yaml)?" - info "resetting config repo" - rm -rf pkgs - fi - mv $TMP_DIR/values.yaml . -} - -# return list of enabled packages -function enabled_pkgs { - local pkgs="" - local directory=$1 - local key=$2 - for dir in $(ls $directory) - do - local pkg=$(basename $dir) - local enabled=$(yq r values.yaml $key.${pkg}.enabled) - if [ "$enabled" == "true" ] - then - pkgs="${pkgs} $pkg" - fi - done - echo $pkgs -} - -# make K8s manifest file for shared services given config, path to directory, and prefix to strip -function template_files { - local config=$1 - local path=$2 - local prefix=$3 - local fullpath - local cluster=$(get_cluster) - local region=$(get_region) - for fullpath in $(find $path -type f -print) - do - local filename=${fullpath#$prefix} - mkdir -p $(dirname $filename) - if [ "${filename: -5}" == ".yaml" ] - then - add_file $filename - cp $fullpath $filename - elif [ "${filename: -8}" == ".jsonnet" ] - then - add_file ${filename%.jsonnet} - jsonnet --tla-code config="$config" $fullpath | yq r - > ${filename%.jsonnet} - expect_success "Templating failure $filename" - fi - done -} - -# make K8s manifest files for shared services -function make_shared_config { - start "creating package manifests" - local config=$(get_config) - rm -rf pkgs - local dir - for dir in $(enabled_pkgs $TEMPLATE_DIR/pkgs pkgs) - do - template_files "$config" $TEMPLATE_DIR/pkgs/$dir $TEMPLATE_DIR/ - done - complete "created package manifests" -} - -# make EKSCTL manifest file -function make_cluster_config { - local config=$(get_config) - start "creating eksctl manifest" - add_file "config.yaml" - jsonnet --tla-code config="$config" ${TEMPLATE_DIR}/eksctl/cluster_config.jsonnet | yq r - > config.yaml - expect_success "Templating failure eksctl/cluster_config.jsonnet" - complete "created eksctl manifest" -} - -# make K8s manifests for enviroments given config, path, prefix, and environment name -function environment_template_files { - local config=$1 - local path=$2 - local prefix=$3 - local env=$4 - for fullpath in $(find $path -type f -print) - do - local filename=${fullpath#$prefix} - local dir=environments/$env/$(dirname $filename) - local file=$(basename $filename) - mkdir -p $dir - if [ "${file: -8}" == ".jsonnet" ] - then - local out=$dir/${file%.jsonnet} - local prev=$TMP_DIR/$dir/${file%.jsonnet} - add_file $out - if [ -f $prev ] - then - yq r $prev -j > $TMP_DIR/${file%.jsonnet} - else - echo "{}" > $TMP_DIR/${file%.jsonnet} - fi - jsonnet --tla-code-file prev=$TMP_DIR/${file%.jsonnet} --tla-code config="$config" --tla-str namespace=$env $fullpath | yq r - > $dir/${file%.jsonnet} - expect_success "Templating failure $filename" - rm $TMP_DIR/${file%.jsonnet} - fi - done -} - -# make K8s manifests for environments -function make_environment_config { - local config=$(get_config) - local env - mv environments $TMP_DIR - for env in $(get_environments) - do - start "creating $env environment manifests" - for dir in $(enabled_pkgs $TEMPLATE_DIR/environments environments.$env) - do - environment_template_files "$config" $TEMPLATE_DIR/environments/$dir $TEMPLATE_DIR/environments/ $env - done - complete "created $env environment manifests" - done -} - -# create all K8s manifests and EKSCTL manifest -function make_config { - start "creating manifests" - make_shared_config - make_cluster_config - make_environment_config - complete "created manifests" -} - -# persist changes to config repo in GitHub -function save_changes { - establish_ssh - start "saving changes to config repo" - git add . - complete "added changes to config repo" - git commit -m "$1" - complete "committed changes to config repo" - git push - complete "pushed changes to config repo" -} - -# confirm cluster exists or exist -function expect_cluster_exists { - local cluster=$(get_cluster) - eksctl get cluster --name $cluster - expect_success "cluster $cluster does not exist." -} - -# install flux into cluster -function make_flux { - local cluster=$(get_cluster) - local email=$(get_email) - start "installing flux into cluster $cluster" - establish_ssh - rm -rf flux - EKSCTL_EXPERIMENTAL=true unbuffer eksctl install \ - flux -f config.yaml --git-url=${GIT_REMOTE_REPO}.git --git-email=$email --git-label=$cluster | tee $TMP_DIR/eksctl.out - expect_success "eksctl install flux failed." - git pull - complete "installed flux into cluster $cluster" -} - -# save Certificate Authority key and pem into AWS secrets manager -function save_ca { - start "saving certificate authority TLS pem and key to AWS secrets manager" - local cluster=$(get_cluster) - local dir=$(cat $TMP_DIR/eksctl.out | grep "Public key infrastructure" | sed -e 's/^.*"\(.*\)".*$/\1/') - - aws secretsmanager describe-secret --secret-id $cluster/flux/ca.pem 2>/dev/null - if [ $? -ne 0 ] - then - aws secretsmanager create-secret --name $cluster/flux/ca.pem --secret-string "$(cat $dir/ca.pem)" - expect_success "failed to create ca.pem to AWS" - aws secretsmanager create-secret --name $cluster/flux/ca-key.pem --secret-string "$(cat $dir/ca-key.pem)" - expect_success "failed to create ca-key.pem to AWS" - else - aws secretsmanager update-secret --secret-id $cluster/flux/ca.pem --secret-string "$(cat $dir/ca.pem)" - expect_success "failed to update ca.pem to AWS" - aws secretsmanager update-secret --secret-id $cluster/flux/ca-key.pem --secret-string "$(cat $dir/ca-key.pem)" - expect_success "failed to update ca-key.pem to AWS" - fi - complete "saved certificate authority TLS pem and key to AWS secrets manager" -} - -# save deploy key to config repo -function make_key { - start "authorizing access to ${GIT_REMOTE_REPO}" - - local key=$(fluxctl --k8s-fwd-ns=flux identity) - local reponame="$(echo $GIT_REMOTE_REPO | cut -d: -f2 | sed -e 's/\.git//')" - local cluster=$(get_cluster) - - curl -X POST -i\ - -H"Authorization: token $GITHUB_TOKEN"\ - --data @- https://api.github.com/repos/$reponame/keys << EOF - { - - "title" : "flux key for $cluster created by make_flux", - "key" : "$key", - "read_only" : false - } -EOF - complete "authorized access to ${GIT_REMOTE_REPO}" -} - -# update flux and helm operator manifests -function update_flux { - start "updating flux and flux-helm-operator manifests" - local config=$(get_config) - - if [ -f flux/flux-deployment.yaml ] - then - yq r flux/flux-deployment.yaml -j > $TMP_DIR/flux.json - yq r flux/helm-operator-deployment.yaml -j > $TMP_DIR/helm.json - yq r flux/tiller-dep.yaml -j > $TMP_DIR/tiller.json - - jsonnet --tla-code config="$config" --tla-code-file flux="$TMP_DIR/flux.json" --tla-code-file helm="$TMP_DIR/helm.json" $TEMPLATE_DIR/flux/flux.jsonnet >$TMP_DIR/updated.json --tla-code-file tiller="$TMP_DIR/tiller.json" - expect_success "Templating failure flux/flux.jsonnet" - - add_file flux/flux-deployment-updated.yaml - yq r $TMP_DIR/updated.json flux >flux/flux-deployment-updated.yaml - expect_success "Serialization flux/flux-deployment-updated.yaml" - - add_file flux/helm-operator-deployment-updated.yaml - yq r $TMP_DIR/updated.json helm >flux/helm-operator-deployment-updated.yaml - expect_success "Serialization flux/helm-operator-deployment-updated.yaml" - - add_file flux/tiller-dep-updated.yaml - yq r $TMP_DIR/updated.json tiller >flux/tiller-dep-updated.yaml - expect_success "Serialization flux/tiller-dep--updated.yaml" - - rename_file flux/flux-deployment.yaml flux/flux-deployment.yaml.orig - mv flux/flux-deployment.yaml flux/flux-deployment.yaml.orig - - rename_file flux/helm-operator-deployment.yaml flux/helm-operator-deployment.yaml.orig - mv flux/helm-operator-deployment.yaml flux/helm-operator-deployment.yaml.orig - - rename_file flux/tiller-dep.yaml flux/tiller-dep.yaml.orig - mv flux/tiller-dep.yaml flux/tiller-dep.yaml.orig - fi - complete "updated flux and flux-helm-operator manifests" -} - -function mykubectl { - KUBECONFIG=~/.kube/config kubectl $@ -} - -# create service mesh -function make_mesh { - linkerd check --pre - expect_success "Failed linkerd pre-check." - start "installing mesh" - info "linkerd check --pre" - - rm -rf linkerd - mkdir -p linkerd - add_file "linkerd/linkerd-config.yaml" - (cd linkerd; linkerd install config | separate_files | add_names) - linkerd install config | mykubectl apply -f - - - linkerd check config - while [ $? -ne 0 ] - do - sleep 3 - info "retrying linkerd check config" - linkerd check config - done - info "linkerd check config" - - add_file "linkerd/linkerd-control-plane.yaml" - (cd linkerd; linkerd install control-plane | separate_files | add_names) - linkerd install control-plane | mykubectl apply -f - - - linkerd check - while [ $? -ne 0 ] - do - sleep 3 - info "retrying linkerd check" - linkerd check - done - complete "installed mesh" -} - -# get secrets from legacy environments if requested -function get_secrets { - local cluster=$(get_cluster) - local env - for env in $(get_environments) - do - local source=$(yq r values.yaml environments.${env}.tidepool.source) - if [ "$source" == "null" -o "$source" == "" ] - then - continue - fi - if [ "$source" == "dev" -o "$source" == "stg" -o "$source" == "int" -o "$source" == "prd" ] - then - $SM_DIR/bin/git_to_map $source | $SM_DIR/bin/map_to_k8s $env - else - panic "Unknown secret source $source" - fi - done -} - -# create k8s system master users -function make_users { - local group=system:masters - local cluster=$(get_cluster) - local aws_region=$(get_region) - local account=$(get_aws_account) - - start "adding system masters" - local user - for user in $(get_iam_users) - do - local arn=arn:aws:iam::${account}:user/${user} - eksctl create iamidentitymapping --region=$aws_region --role=$arn --group=$group --name=$cluster --username=$user - while [ $? -ne 0 ] - do - sleep 3 - eksctl create iamidentitymapping --region=$aws_region --role=$arn --group=$group --name=$cluster --username=$user - info "retrying eksctl create iamidentitymapping" - done - info "added $user to cluster $cluster" - done - complete "added system masters" -} - - -# confirm that values.yaml file exists -function expect_values_not_exist { - if [ -f values.yaml ] - then - confirm "Are you sure that you want to overwrite prior contents of values.yaml?" - fi -} - -# create initial values file -function make_values { - start "creating values.yaml" - add_file "values.yaml" - cat $TMP_DIR/eks-template/values.yaml >values.yaml - cat >>values.yaml < xxx.yaml - mv xxx.yaml values.yaml - if [ "$APPROVE" != "true" ] - then - ${EDITOR:-vi} values.yaml - fi - complete "created values.yaml" -} - -# enter into bash to allow manual editing of config repo -function edit_config { - info "exit shell when done making changes." - bash - confirm "Are you sure you want to commit changes?" -} - -# show recent diff -function diff_config { - git diff HEAD~1 -} - -# edit values file -function edit_values { - if [ -f values.yaml ] - then - info "editing values file for repo $GIT_REMOTE_REPO" - ${EDITOR:-vi} values.yaml - else - panic "values.yaml does not exist." - fi -} - - -# generate random secrets -function randomize_secrets { - local env - for env in $(get_environments) - do - local file - for file in $(find $CHART_DIR -name \*secret.yaml -print) - do - helm template --namespace $env --set global.secret.generated=true $CHART_DIR -f $CHART_DIR/values.yaml -x $file >$TMP_DIR/x - grep "kind" $TMP_DIR/x >/dev/null 2>&1 - if [ $? -eq 0 ] - then - cat $TMP_DIR/x - fi - rm $TMP_DIR/x - done - done -} - -# delete cluster from EKS, including cloudformation templates -function delete_cluster { - cluster=$(get_cluster) - confirm "Are you sure that you want to delete cluster $cluster?" - start "deleting cluster $cluster" - eksctl delete cluster --name=$cluster - expect_success "Cluster deletion failed." - info "cluster $cluster deletion takes ~10 minutes to complete" -} - -# remove service mesh from cluster and config repo -function remove_mesh { - start "removing linkerd" - linkerd install --ignore-cluster | mykubectl delete -f - - rm -rf linkerd - complete "removed linkerd" -} - -function create_repo { - read -p "${GREEN}repo name?${RESET} " -r - REMOTE_REPO=$REPLY - DATA='{"name":"yolo-test", "private":"true"}' - D=$(echo $DATA | sed -e "s/yolo-test/$REMOTE_REPO/") - - read -p "${GREEN}Is this for an organization? ${RESET}" -r - if [[ "$REPLY" =~ (y|Y)* ]] - then - read -p $"${GREEN} Name of organization [tidepool-org]?${RESET} " ORG - ORG=${ORG:-tidepool-org} - REMOTE_REPO=$ORG/$REMOTE_REPO - curl https://api.github.com/orgs/$ORG/repos?access_token=${GITHUB_TOKEN} -d "$D" - else - read -p $"${GREEN} User name?${RESET} " -r - REMOTE_REPO=$REPLY/$REMOTE_REPO - curl https://api.github.com/user/repos?access_token=${GITHUB_TOKEN} -d "$D" - fi - complete "private repo created" - check_remote_repo -} - -function gloo_dashboard { - mykubectl port-forward -n gloo-system deployment/api-server 8081:8080 & - open -a "Google Chrome" http://localhost:8081 -} - -function remove_gloo { - glooctl install gateway --dry-run | mykubectl delete -f - -} - -# await deletion of a CloudFormation template that represents a cluster before returning -function await_deletion { - local cluster=$(get_cluster) - start "awaiting cluster $cluster deletion" - aws cloudformation wait stack-delete-complete --stack-name eksctl-${cluster}-cluster - expect_success "Aborting wait" - complete "cluster $cluster deleted" -} - -# migrate secrets from legacy GitHub repo to AWS secrets manager -function migrate_secrets { - local cluster=$(get_cluster) - mkdir -p external-secrets - (cd external-secrets; get_secrets | external_secret upsert $cluster plaintext | separate_files | add_names) -} - -function create_secrets_managed_policy { - local file=$TMP_DIR/policy.yaml - - local cluster=$(get_cluster) - local region=$(get_region) - local stack_name=eksctl-${cluster}-external-secrets-managed-policy - aws cloudformation describe-stacks --stack-name $stack_name >/dev/null 2>&1 - if [ $? -ne 0 ] - then - start "Creating IAM Managed Policy for secrets management for $cluster in region $region" - local cf_file=file://$(realpath $file) - local account=$(get_aws_account) - # XXX - only supports case where cluster == env - - cat >$file <>$file <&2 - exit 1 - ;; - *) # preserve positional arguments - PARAMS="$PARAMS $1" - shift - ;; - esac -done -# set positional arguments in their proper place -eval set -- "$PARAMS" - -unset TMP_DIR -unset TEMPLATE_DIR -unset CHART_DIR -unset DEV_DIR -unset SM_DIR - -define_colors - -for param in $PARAMS -do - case $param in - all) - check_remote_repo - expect_github_token - setup_tmpdir - clone_remote - set_template_dir - set_tools_dir - expect_values_not_exist - make_values - save_changes "Added values" - make_config - save_changes "Added config packages" - create_secrets_managed_policy - make_cluster - merge_kubeconfig - make_users - save_changes "Added cluster and users" - install_gloo - save_changes "Added gloo" - make_mesh - save_changes "Added linkerd mesh" - make_flux - save_ca - make_cert - make_key - update_flux - save_changes "Added flux" - clone_secret_map - establish_ssh - migrate_secrets - establish_ssh - save_changes "Added migrated secrets" - ;; - repo) - setup_tmpdir - create_repo - ;; - values) - check_remote_repo - setup_tmpdir - clone_remote - expect_values_not_exist - set_template_dir - make_values - save_changes "Added values" - ;; - config) - check_remote_repo - setup_tmpdir - clone_remote - set_template_dir - set_tools_dir - make_config - save_changes "Added config packages" - ;; - cluster) - check_remote_repo - setup_tmpdir - clone_remote - set_tools_dir - create_secrets_managed_policy - make_cluster - merge_kubeconfig - make_users - save_changes "Added cluster and users" - ;; - gloo) - check_remote_repo - expect_github_token - setup_tmpdir - clone_remote - set_template_dir - set_tools_dir - confirm_matching_cluster - install_gloo - save_changes "Installed gloo" - ;; - flux) - check_remote_repo - expect_github_token - setup_tmpdir - clone_remote - set_template_dir - set_tools_dir - confirm_matching_cluster - make_flux - save_ca - make_cert - make_key - update_flux - save_changes "Added flux" - ;; - mesh) - check_remote_repo - setup_tmpdir - clone_remote - set_tools_dir - confirm_matching_cluster - make_mesh - save_changes "Added linkerd mesh" - ;; - edit_values) - check_remote_repo - setup_tmpdir - clone_remote - set_template_dir - set_tools_dir - edit_values - make_config - save_changes "Edited values. Updated config." - ;; - regenerate_cert) - check_remote_repo - setup_tmpdir - clone_remote - set_tools_dir - make_cert - ;; - copy_assets) - check_remote_repo - make_assets - ;; - randomize_secrets) - check_remote_repo - setup_tmpdir - clone_remote - set_tools_dir - local cluster=$(get_cluster) - mkdir -p external-secrets - (cd external-secrets; randomize_secrets | external_secret upsert $cluster encoded | separate_files | add_names) - save_changes "Added random secrets" - ;; - migrate_secrets) - check_remote_repo - setup_tmpdir - clone_remote - set_tools_dir - clone_secret_map - establish_ssh - migrate_secrets - save_changes "Added migrated secrets" - ;; - upsert_plaintext_secrets) - check_remote_repo - setup_tmpdir - clone_remote - set_tools_dir - local cluster=$(get_cluster) - mkdir -p external-secrets - (cd external-secrets; external_secret upsert $cluster plaintext | separate_files | add_names) - save_changes "Added plaintext secrets" - ;; - install_users) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - make_users - ;; - deploy_key) - check_remote_repo - setup_tmpdir - clone_remote - expect_github_token - make_key - ;; - delete_cluster) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - delete_cluster - ;; - await_deletion) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - await_deletion - info "cluster deleted" - ;; - remove_mesh) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - remove_mesh - save_changes "Removed mesh." - ;; - edit_repo) - check_remote_repo - setup_tmpdir - clone_remote - edit_config - save_changes "Manual changes." - ;; - merge_kubeconfig) - check_remote_repo - setup_tmpdir - clone_remote - merge_kubeconfig - ;; - remove_gloo) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - remove_gloo - ;; - gloo_dashboard) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - gloo_dashboard - ;; - linkerd_dashboard) - check_remote_repo - setup_tmpdir - clone_remote - confirm_matching_cluster - linkerd_dashboard - ;; - managed_policies) - check_remote_repo - setup_tmpdir - clone_remote - create_secrets_managed_policy - ;; - diff) - check_remote_repo - setup_tmpdir - clone_remote - diff_config - ;; - envrc) - check_remote_repo - setup_tmpdir - clone_remote - make_envrc - save_changes "Added envrc" - ;; - sumo) - check_remote_repo - setup_tmpdir - clone_remote - install_sumo - ;; - *) - panic "unknown command: $param" - ;; - esac -done -