Skip to content

Commit

Permalink
Merge branch 'main' into jdjaustin/issue-82-enable-rubocop-for-all-fo…
Browse files Browse the repository at this point in the history
…rest-iac
  • Loading branch information
jdjaustin authored Aug 31, 2023
2 parents 9672b7d + 32e092d commit eec39e6
Show file tree
Hide file tree
Showing 26 changed files with 1,581 additions and 23 deletions.
38 changes: 38 additions & 0 deletions .github/workflows/deploy-benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
---
name: Benchmark Service

on:
workflow_dispatch:
pull_request:
branches:
- main
paths:
- 'terraform/benchmark/**'
- 'terraform/modules/benchmark/**'
push:
branches:
- main
paths:
- 'terraform/forest-calibnet/**'
- 'terraform/modules/filecoin_node/**'

jobs:
benchmark:
name: Benchmark Service
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Checkout the code
uses: actions/checkout@v3

# Using Custom Composite action in ./composite-action/terraform folder
- name: Composite Action for Deploying Terraform Resources
uses: ./composite-action/terraform
with:
do_token: ${{ secrets.DO_TOKEN }}
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
slack_token: ${{ secrets.SLACK_TOKEN }}
working_directory: terraform/benchmark
environment: Benchmark Service
2 changes: 1 addition & 1 deletion .rubocop.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ AllCops:
Layout/LineLength:
Max: 120
Metrics/MethodLength:
Max: 20
Max: 20
5 changes: 4 additions & 1 deletion archival-snapshots-generate/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,16 @@ fn main() -> anyhow::Result<()> {
if which::which("forest-cli").is_err() {
bail!("forest-cli is not installed");
}
if which::which("forest-tool").is_err() {
bail!("forest-tool is not installed");
}

env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();

let args = Args::parse();

info!("Analyzing the provided snapshot file");
let epochs = std::process::Command::new("forest-cli")
let epochs = std::process::Command::new("forest-tool")
.args([
"archive",
"info",
Expand Down
66 changes: 66 additions & 0 deletions terraform/benchmark/.terraform.lock.hcl

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

67 changes: 67 additions & 0 deletions terraform/benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Overview

This folder contains an executable description of a benchmarking service that
compares the performance of Forest and Lotus latest releases. The benchmark results are
uploaded to an S3 bucket, where they are stored in both weekly file and `all_result` file.

# Workflow

Changing any of the settings (such as the size of the droplet or the operating
system) will automatically re-deploy the service. The same is true for changing
any of the scripts.

To propose new changes, start by opening a PR. This will trigger a new
deployment plan to be pasted in the PR comments. Once the PR is merged, the
deployment plan is executed.

The workflow has access to all the required secrets (DO token, slack token, S3
credentials, etc) and none of them have to be provided when creating a new PR.
However, the deployment workflow is not triggered automatically if you change
the secrets. In this case, you have to trigger the workflow manually.

# Manual deployments

To manually deploy the service (useful for testing and debugging), you first
need to set the following environment variables (you will be prompted later if
you don't set these variables):

## Required environment variables

```bash
# DigitalOcean personal access token: https://cloud.digitalocean.com/account/api/tokens
export TF_VAR_do_token=
# Slack access token: https://api.slack.com/apps
export TF_VAR_slack_token=
# S3 access keys used by the snapshot service. Can be generated here: https://cloud.digitalocean.com/account/api/spaces
export TF_VAR_AWS_ACCESS_KEY_ID=
export TF_VAR_AWS_SECRET_ACCESS_KEY=
# S3 access keys used by terraform, use the same values as above
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
```

Forest tokens can be found on 1password.

You also need to register your public key with Digital Ocean. This can be done
here: https://cloud.digitalocean.com/account/security

To prepare terraform for other commands:
```bash
$ terraform init
```

To inspect a new deployment plan (it'll tell you which servers will be removed,
added, etc.):
```bash
$ terraform plan
```

To deploy the service:
```bash
$ terraform apply
```

To shutdown the service:
```bash
$ terraform destroy
```
47 changes: 47 additions & 0 deletions terraform/benchmark/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
terraform {
required_version = "~> 1.3"

backend "s3" {
# Note: This is the bucket for the internal terraform state. This bucket is
# completely independent from the bucket that contains snapshots.
bucket = "forest-iac"
# This key uniquely identifies the service. To create a new service (instead
# of modifying this one), use a new key. Unfortunately, variables may not be
# used here.
key = "benchmark.tfstate"

# This value is completely unused by DO but _must_ be a known AWS region.
region = "us-west-1"
# The S3 region is determined by the endpoint. fra1 = Frankfurt.
# This region does not have to be shared by the droplet.
endpoint = "https://fra1.digitaloceanspaces.com"

# Credentially can be validated through the Security Token Service (STS).
# Unfortunately, DigitalOcean does not support STS so we have to skip the
# validation.
skip_credentials_validation = "true"
}
}

module "benchmark" {
# Import the benchmark module
source = "../modules/benchmark"

# Configure service:
name = "forest-benchmark" # droplet name
size = "so-2vcpu-16gb" # droplet size
slack_channel = "#forest-notifications" # slack channel for notifications
benchmark_bucket = "forest-benchmarks"
benchmark_endpoint = "fra1.digitaloceanspaces.com"

# Variable passthrough:
slack_token = var.slack_token
AWS_ACCESS_KEY_ID = var.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = var.AWS_SECRET_ACCESS_KEY
digitalocean_token = var.do_token
}

# This ip address may be used in the future by monitoring software
output "ip" {
value = [module.benchmark.ip]
}
19 changes: 19 additions & 0 deletions terraform/benchmark/variable.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
variable "do_token" {
description = "Token for authentication."
type = string
}

variable "AWS_ACCESS_KEY_ID" {
description = "S3 access key id"
type = string
}

variable "AWS_SECRET_ACCESS_KEY" {
description = "S3 private access key"
type = string
}

variable "slack_token" {
description = "slack access token"
type = string
}
120 changes: 120 additions & 0 deletions terraform/modules/benchmark/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
# This terraform script executes the following steps:
# - Zip the ruby and shell script files (the hash of this zip file is used to
# determine when to re-deploy the service)
# - Boot a new droplet
# - Copy over the zip file
# - Run the init.sh script in the background

terraform {
required_version = "~> 1.3"

required_providers {
digitalocean = {
source = "digitalocean/digitalocean"
version = "~> 2.0"
}
external = {
source = "hashicorp/external"
version = "~> 2.1"
}
local = {
source = "hashicorp/local"
version = "~> 2.1"
}

}
}

provider "digitalocean" {
token = var.digitalocean_token
}

// Note: The init.sh file is also included in the sources.zip such that the hash
// of the archive captures the entire state of the machine.
// This is a workaround, and because of this, we need to suppress the tflint warning here
// for unused declarations related to the 'init.sh' file.
// tflint-ignore: terraform_unused_declarations
data "local_file" "init" {
filename = "${path.module}/service/init.sh"
}


// Ugly hack because 'archive_file' cannot mix files and folders.
data "external" "sources_tar" {
program = ["sh", "${path.module}/prep_sources.sh", path.module]
}

data "local_file" "sources" {
filename = data.external.sources_tar.result.path
}

data "digitalocean_ssh_keys" "keys" {
sort {
key = "name"
direction = "asc"
}
}

locals {
init_commands = ["cd /root/",
"tar xf sources.tar",
# Set required environment variables
"echo 'export AWS_ACCESS_KEY_ID=\"${var.AWS_ACCESS_KEY_ID}\"' >> .forest_env",
"echo 'export AWS_SECRET_ACCESS_KEY=\"${var.AWS_SECRET_ACCESS_KEY}\"' >> .forest_env",
"echo 'export SLACK_API_TOKEN=\"${var.slack_token}\"' >> .forest_env",
"echo 'export SLACK_NOTIF_CHANNEL=\"${var.slack_channel}\"' >> .forest_env",
"echo 'export BENCHMARK_BUCKET=\"${var.benchmark_bucket}\"' >> .forest_env",
"echo 'export BENCHMARK_ENDPOINT=\"${var.benchmark_endpoint}\"' >> .forest_env",
"echo 'export BASE_FOLDER=\"/chainsafe\"' >> .forest_env",
"echo '. ~/.forest_env' >> .bashrc",
". ~/.forest_env",
"nohup sh ./init.sh > init_log.txt &",
# Exiting without a sleep sometimes kills the script :-/
"sleep 10s"
]
}

resource "digitalocean_droplet" "forest" {
image = var.image
name = var.name
region = var.region
size = var.size
# Re-initialize resource if this hash changes:
user_data = join("-", [data.local_file.sources.content_sha256, sha256(join("", local.init_commands))])
tags = ["iac"]
ssh_keys = data.digitalocean_ssh_keys.keys.ssh_keys[*].fingerprint

graceful_shutdown = false

connection {
host = self.ipv4_address
user = "root"
type = "ssh"
}

# Push the sources.tar file to the newly booted droplet
provisioner "file" {
source = data.local_file.sources.filename
destination = "/root/sources.tar"
}

provisioner "remote-exec" {
inline = local.init_commands
}
}

data "digitalocean_project" "forest_project" {
name = var.project
}

# Connect the droplet to the forest project (otherwise it ends up in
# "ChainBridge" which is the default project)
resource "digitalocean_project_resources" "connect_forest_project" {
project = data.digitalocean_project.forest_project.id
resources = [digitalocean_droplet.forest.urn]
}

# This ip address may be used in the future by monitoring software
output "ip" {
value = [digitalocean_droplet.forest.ipv4_address]
}
11 changes: 11 additions & 0 deletions terraform/modules/benchmark/prep_sources.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash

# Copy local source files in a folder together with ruby_common and create a zip archive.

cd "$1" || exit
cp --archive ../../../scripts/ruby_common service/ || exit

rm -f sources.tar
(cd service && tar cf ../sources.tar --sort=name --mtime='UTC 2019-01-01' ./* > /dev/null 2>&1) || exit
rm -fr service/ruby_common
echo "{ \"path\": \"$1/sources.tar\" }"
Loading

0 comments on commit eec39e6

Please sign in to comment.