Loadtesting IAC updates (#32629)

# Github Actions (New)
- New workflow to deploy/destroy loadtest infrastructure with one-click
(Needs to be tested)
- Common inputs drive configuration and deployment of loadtest
infrastructure
    - tag
    - fleet_task_count
    - fleet_task_memory
    - fleet_task_cpu
    - fleet_database_instance_size
    - fleet_database_instance_count
    - fleet_redis_instance_size
    - fleet_redis_instance_count
    - terraform_workspace
    - terraform_action
- New workflow to deploy/destroy osquery-perf to loadtest infrastructure
with one-click (Needs to be tested)
- Common inputs drive configuration and deployment of osquery-perf
resources
    - tag
    - git_branch
    - loadtest_containers
    - extra_flags
    - terraform_workspace
    - terraform_action
- New workflow to deploy shared loadtest resources with one-click (Needs
to be tested)

# Loadtest Infrastructure (New)
- New directory (`infrastructure/loadtesting/terraform/infra`) for
one-click deployment
- Loadtest environment updated to use [fleet-terraform
modules](https://github.com/fleetdm/fleet-terraform)
- [Deployment documentation
updated](0c254bca40/infrastructure/loadtesting/terraform/infra/README.md)
to reflect new steps

# Osquery-perf deployment (New)
- New directory (`infrastructure/loadtesting/terraform/osquery-perf`)
for the deployment of osquery-perf
- osquery-perf updated to use [fleet-terraform
modules](https://github.com/fleetdm/fleet-terraform)
- [Deployment documentation
updated](0c254bca40/infrastructure/loadtesting/terraform/osquery_perf)
to reflect new steps
This commit is contained in:
Jorge Falcon 2025-10-08 15:31:37 -04:00 committed by GitHub
parent 5e506a8620
commit e952ef06c0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
33 changed files with 2137 additions and 13 deletions

229
.github/workflows/loadtest-infra.yml vendored Normal file
View file

@ -0,0 +1,229 @@
name: Deploy Loadtest - Infrastructure
on:
workflow_dispatch:
inputs:
terraform_workspace:
description: "Terraform workspace that you will be deploying to"
type: string
required: true
tag:
description: "Fleet Image Tag to deploy"
type: string
default: "v4.72.1"
required: true
fleet_task_count:
description: "The number of ECS tasks, fleet containers, that should be deployed"
type: string
default: "5"
required: true
fleet_task_memory:
description: "The amount of memoery allocated in Megabytes for the ECS tasks"
type: string
default: "4096"
required: true
fleet_task_cpu:
description: "The amount of CPU allocated in hertz for the ECS tasks"
type: string
default: "512"
required: true
fleet_database_instance_size:
description: "AWS Instance size for the Fleet database instances"
type: string
defualt: "db.t4g.medium"
required: true
fleet_database_instance_count:
description: "Number of database instance replicas (including the writer)"
type: string
default: 2
required: true
fleet_redis_instance_size:
description: "AWS Instance size for the Fleet redis instances"
type: string
defualt: "cache.t4g.micro"
required: true
fleet_redis_instance_count:
description: "Number of redis instance replicas (including the writer)"
type: string
default: 3
required: true
terraform_action:
description: Dry run only? No "terraform apply"
type: choice
options:
- plan
- apply
- destroy
default: plan
# This allows a subsequently queued workflow run to interrupt previous runs
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id}}
cancel-in-progress: true
defaults:
run:
# fail-fast using bash -eo pipefail. See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
shell: bash
working-directory: infrastructure/loadtesting/terraform/infra
env:
AWS_REGION: us-east-2
AWS_IAM_ROLE: arn:aws:iam::917007347864:role/github-actions-role
TF_ACTIONS_WORKING_DIR: infrastructure/loadtesting/terraform/infra
TF_VAR_tag: "${{ inputs.tag }}"
TF_VAR_fleet_task_count: "${{ inputs.fleet_task_count }}"
TF_VAR_fleet_task_memory: "${{ inputs.fleet_task_memory }}"
TF_VAR_fleet_task_cpu: "${{ inputs.fleet_task_cpu }}"
TF_VAR_database_instance_size: "${{ inputs.fleet_database_instance_size }}"
TF_VAR_database_instance_count: "${{ inputs.fleet_database_instance_count }}"
TF_VAR_redis_instance_size: "${{ inputs.fleet_redis_instance_size }}"
TF_VAR_redis_instance_count: "${{ inputs.fleet_redis_instance_count }}"
permissions:
id-token: write
contents: read # This is required for actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
jobs:
deploy:
name: Deploy Fleet Loadtest Environment
runs-on: ubuntu-latest
continue-on-error: true
steps:
- name: Harden Runner
uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- id: fail-on-main
run: "false"
if: ${{ github.ref == 'main' }}
- uses: aws-actions/configure-aws-credentials@67fbcbb121271f7775d2e7715933280b06314838 # v1.7.0
with:
role-to-assume: ${{env.AWS_IAM_ROLE}}
aws-region: ${{ env.AWS_REGION }}
- name: Set up Go
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
with:
go-version-file: 'go.mod'
- uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3
with:
terraform_version: 1.10.2
terraform_wrapper: false
- name: Terraform Init
id: init
run: terraform init
- name: Terraform workspace
id: workspace
run: |
if terraform workspace list | grep -q ${{ input.terraform_workspace }};
then
echo "MATCH - TF_WORKSPACE: ${{ input.terraform_workspace }}\n"
if [[ ${{ input.terraform_action }} = "apply" || ${{ input.terraform_action }} = "plan" || ${{ input.terraform_action }} = "destroy" ]];
then
terraform workspace select ${{ input.terraform_workspace }}
if [[ $(echo $?) = "0" ]];
then
echo "WORKSPACE CHANGED TO ${{ input.terraform_workspace }}\n"
fi
fi
else
echo "NO MATCH - TF_WORKSPACE: ${{ input.terraform_workspace }}\n"
if [[ ${{ input.terraform_action }} = "apply" || ${{ input.terraform_action }} = "plan" ]];
then
echo "CREATING NEW TERRAFORM WORKSPACE: ${{ input.terraform_workspace }}"
terraform workspace new ${{ input.terraform_workspace }}
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM WORKSPACE: SUCCESSFULLY CREATED"
else
echo "TERRAFORM WORKSPACE: ERROR CREATING"
fi
fi
fi
continue-on-error: true
- name: Terraform fmt
id: fmt
run: terraform fmt -check
continue-on-error: true
- name: Terraform Validate
id: validate
run: terraform validate -no-color
- name: Terraform Plan
id: plan
run: |
if [[ `terraform workspace show` = "${{ input.terraform_workspace }}" ]];
then
echo "TERRAFORM WORKSPACE: MATCHES - ${{ input.terraform_workspace }}"
terraform plan -no-color
else
echo "TERRAFORM WORKSPACE: DOES NOT MATCH INPUT - ${{ input.terraform_workspace }}"
fi
continue-on-error: true
- name: Terraform Apply
if: inputs.terraform_action == 'apply'
id: apply
run: |
if [[ `terraform workspace show` = "${{ input.terraform_workspace }}" ]];
then
echo "TERRAFORM WORKSPACE: MATCHES - ${{ input.terraform_workspace }}"
terraform apply -auto-approve
else
echo "TERRAFORM WORKSPACE: DOES NOT MATCH INPUT - ${{ input.terraform_workspace }}"
fi
- name: Terraform Destroy
if: inputs.terraform_action == 'destroy'
id: destroy
run: |
if [[ `terraform workspace show` = "${{ input.terraform_workspace }}" ]];
then
echo "TERRAFORM WORKSPACE: MATCHES - ${{ input.terraform_workspace }}"
if [[ ${{ input.terraform_action }} = "destroy" ]];
then
if [[ $(terraform state list | wc -l) -gt 0 ]];
then
echo "RESOURCES DETECTED IN TERRAFORM STATE FILE"
terraform destroy -auto-approve
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM DESTROY: SUCCESSFUL\n"
TERRAFORM_DELETE_STATUS="complete"
else
echo "TERRAFORM DESTROY: ERROR\n"
TERRAFORM_DELETE_STATUS="error"
fi
else
echo "NO RESOURCES DETECTED IN TERRAFORM STATE FILE"
TERRAFORM_DELETE_STATUS="complete"
fi
if [[ $TERRAFORM_DELETE_STATUS = "complete" ]];
then
echo "SETTING WORKSPACE TO DEFAULT"
terraform workspace select default
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM WORKSPACE: SUCCESSFULLY SELECTING DEFAULT\n"
else
echo "TERRAFORM WORKSPACE: ERROR SELECTING DEFAULT\n"
fi
terraform workspace delete ${{ input.terraform_workspace }}
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM WORKSPACE: SUCCESSFULLY DELETED\n"
else
echo "TERRAFORM WORKSPACE: ERROR DELETING\n"
fi
fi
fi
else
echo "TERRAFORM WORKSPACE: DOES NOT MATCH INPUT - ${{ input.terraform_workspace }}"
fi

View file

@ -0,0 +1,204 @@
name: Deploy Loadtest - Osquery Perf
on:
workflow_dispatch:
inputs:
terraform_workspace:
description: "Terraform workspace that you will be deploying to."
type: string
required: true
tag:
description: "Tag for osquery-perf deployment"
type: string
default: "v4.72.0"
required: true
git_branch:
description: "git branch for osquery-perf deployment"
type: string
default: "main"
required: true
loadtest_containers:
description: "Count of osquery-perf tasks to run"
type: string
required: true
extra_flags:
description: "Extra flags for osquery-perf. Example: [\"--orbit_prob\", \"0.0\"]"
type: string
required: false
terraform_action:
description: Dry run only? No "terraform apply"
type: choice
options:
- plan
- apply
- destroy
default: plan
# This allows a subsequently queued workflow run to interrupt previous runs
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id}}
cancel-in-progress: true
defaults:
run:
# fail-fast using bash -eo pipefail. See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
shell: bash
working-directory: infrastructure/loadtesting/terraform/osquery_perf
env:
AWS_REGION: us-east-2
AWS_IAM_ROLE: arn:aws:iam::917007347864:role/github-actions-role
TF_ACTIONS_WORKING_DIR: infrastructure/loadtesting/terraform/osquery_perf
TF_VAR_extra_flags: "${{ inputs.extra_flags }}"
TF_VAR_loadtest_containers: "${{ inputs.loadtest_containers }}"
TF_VAR_tag: "${{ inputs.tag }}"
TF_VAR_git_branch: "${{ inputs.git_branch }}"
permissions:
id-token: write
contents: read # This is required for actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
jobs:
deploy:
name: Deploy Fleet Loadtest Environment
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- id: fail-on-main
run: "false"
if: ${{ github.ref == 'main' }}
- uses: aws-actions/configure-aws-credentials@67fbcbb121271f7775d2e7715933280b06314838 # v1.7.0
with:
role-to-assume: ${{env.AWS_IAM_ROLE}}
aws-region: ${{ env.AWS_REGION }}
- name: Set up Go
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
with:
go-version-file: 'go.mod'
- uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3
with:
terraform_version: 1.10.2
terraform_wrapper: false
- name: Terraform Init
id: init
run: terraform init
- name: Terraform workspace
id: workspace
run: |
if terraform workspace list | grep -q ${{ input.terraform_workspace }};
then
echo "MATCH - TF_WORKSPACE: ${{ input.terraform_workspace }}\n"
if [[ ${{ input.terraform_action }} = "apply" || ${{ input.terraform_action }} = "plan" || ${{ input.terraform_action }} = "destroy" ]];
then
terraform workspace select ${{ input.terraform_workspace }}
if [[ $(echo $?) = "0" ]];
then
echo "WORKSPACE CHANGED TO ${{ input.terraform_workspace }}\n"
fi
fi
else
echo "NO MATCH - TF_WORKSPACE: ${{ input.terraform_workspace }}\n"
if [[ ${{ input.terraform_action }} = "apply" || ${{ input.terraform_action }} = "plan" ]];
then
echo "CREATING NEW TERRAFORM WORKSPACE: ${{ input.terraform_workspace }}"
terraform workspace new ${{ input.terraform_workspace }}
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM WORKSPACE: SUCCESSFULLY CREATED"
else
echo "TERRAFORM WORKSPACE: ERROR CREATING"
fi
fi
fi
continue-on-error: true
- name: Terraform fmt
id: fmt
run: terraform fmt -check
continue-on-error: true
- name: Terraform Validate
id: validate
run: terraform validate -no-color
- name: Terraform Plan
id: plan
run: |
if [[ `terraform workspace show` = "${{ input.terraform_workspace }}" ]];
then
echo "TERRAFORM WORKSPACE: MATCHES - ${{ input.terraform_workspace }}"
terraform plan -no-color
else
echo "TERRAFORM WORKSPACE: DOES NOT MATCH INPUT - ${{ input.terraform_workspace }}"
fi
continue-on-error: true
- name: Terraform Apply
if: inputs.terraform_action == 'apply'
id: apply
run: |
if [[ `terraform workspace show` = "${{ input.terraform_workspace }}" ]];
then
echo "TERRAFORM WORKSPACE: MATCHES - ${{ input.terraform_workspace }}"
terraform apply -auto-approve
else
echo "TERRAFORM WORKSPACE: DOES NOT MATCH INPUT - ${{ input.terraform_workspace }}"
fi
- name: Terraform Destroy
if: inputs.terraform_action == 'destroy'
id: destroy
run: |
if [[ `terraform workspace show` = "${{ input.terraform_workspace }}" ]];
then
echo "TERRAFORM WORKSPACE: MATCHES - ${{ input.terraform_workspace }}"
if [[ ${{ input.terraform_action }} = "destroy" ]];
then
if [[ $(terraform state list | wc -l) -gt 0 ]];
then
echo "RESOURCES DETECTED IN TERRAFORM STATE FILE"
echo "TERRAFORM DESTROY: STARTED"
terraform destroy -auto-approve
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM DESTROY: SUCCESSFUL\n"
TERRAFORM_DELETE_STATUS="complete"
else
echo "TERRAFORM DESTROY: ERROR\n"
TERRAFORM_DELETE_STATUS="error"
fi
else
echo "NO RESOURCES DETECTED IN TERRAFORM STATE FILE"
TERRAFORM_DELETE_STATUS="complete"
fi
if [[ $TERRAFORM_DELETE_STATUS = "complete" ]];
then
echo "SETTING WORKSPACE TO DEFAULT"
terraform workspace select default
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM WORKSPACE: SUCCESSFULLY SELECTING DEFAULT\n"
else
echo "TERRAFORM WORKSPACE: ERROR SELECTING DEFAULT\n"
fi
echo "REMOVING TERRAFORM WORKSPACE: ${{ input.terraform_workspace }}"
terraform workspace delete ${{ input.terraform_workspace }}
if [[ $(echo $?) = "0" ]];
then
echo "TERRAFORM WORKSPACE: SUCCESSFULLY DELETED\n"
else
echo "TERRAFORM WORKSPACE: ERROR DELETING\n"
fi
fi
fi
else
echo "TERRAFORM WORKSPACE: DOES NOT MATCH INPUT - ${{ input.terraform_workspace }}"
fi

82
.github/workflows/loadtest-shared.yml vendored Normal file
View file

@ -0,0 +1,82 @@
name: Deploy Loadtest - Shared
on:
workflow_dispatch:
inputs:
terraform_action:
description: Dry run only? No "terraform apply"
type: choice
options:
- plan
- apply
- destroy
default: plan
# This allows a subsequently queued workflow run to interrupt previous runs
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id}}
cancel-in-progress: true
defaults:
run:
# fail-fast using bash -eo pipefail. See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
shell: bash
working-directory: infrastructure/loadtesting/terraform/infra
env:
AWS_REGION: us-east-2
AWS_IAM_ROLE: arn:aws:iam::917007347864:role/github-actions-role
TF_ACTIONS_WORKING_DIR: infrastructure/loadtesting/terraform/shared
permissions:
id-token: write
contents: read # This is required for actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
jobs:
deploy:
name: Deploy Fleet Loadtest Environment
runs-on: ubuntu-latest
continue-on-error: true
steps:
- name: Harden Runner
uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- id: fail-on-main
run: "false"
if: ${{ github.ref == 'main' }}
- uses: aws-actions/configure-aws-credentials@67fbcbb121271f7775d2e7715933280b06314838 # v1.7.0
with:
role-to-assume: ${{env.AWS_IAM_ROLE}}
aws-region: ${{ env.AWS_REGION }}
- name: Set up Go
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
with:
go-version-file: 'go.mod'
- uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3
with:
terraform_version: 1.10.2
terraform_wrapper: false
- name: Terraform Init
id: init
run: terraform init
- name: Terraform fmt
id: fmt
run: terraform fmt -check
continue-on-error: true
- name: Terraform Validate
id: validate
run: terraform validate -no-color
- name: Terraform Plan
id: plan
run: terraform plan -no-color
continue-on-error: true
- name: Terraform Apply
if: inputs.terraform_action == 'apply'
id: apply
run: terraform apply -auto-approve
- name: Terraform Destroy
if: inputs.terraform_action == 'destroy'
id: destroy
run: terraform destroy -auto-approve

View file

@ -1,2 +1,5 @@
.external_modules
.terraform.lock.hcl
infra/.terraform
osquery_perf/.terraform

View file

@ -0,0 +1,102 @@
# Deploy Loadtesting Infrastructure
# Before we begin
Although deployments through the github action should be prioritized, for manual deployments you will need.
- Terraform v1.10.2
- Docker
- Go
Additionally, refer to the [Reference Architecture sizing recommendations](https://fleetdm.com/docs/deploy/reference-architectures#aws) for loadtest infrastructure sizing.
# Deploy with Github Actions (Coming Soon)
## Deploy/Destroy environment with Github Action
1. [Navigate to the github action](https://github.com/fleetdm/fleet/actions/workflows/loadtest-infra.yml)
2. On the top right corner, select the `Run Workflow` dropdown.
3. Fill out the details for the deployment.
4. After all details have been filled out, you will hit the green `Run Workflow` button, directly under the inputs. For `terraform_action` select `Plan`, `Apply`, or `Destroy`.
- Plan will show you the results of a dry-run
- Apply will deploy changes to the environment
- Destroy will destroy your environment
# Deploy environment manually
1. Clone the repository
2. Initialize terraform
```sh
terraform init
```
3. Create a new the terraform workspace or select an existing workspace for your environment. The terraform workspace will be used in different area's of Terraform to drive uniqueness and access to the environment.
```sh
terraform workspace new <workspace_name>
```
or, if your workspace already exists
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
4. Ensure that your new or existing workspace is in use.
```sh
terraform workspace show
```
5. Deploy the environment (will also trigger migrations automatically)
> Note: Terraform will prompt you for confirmation to trigger the deployment. If everything looks ok, submitting `yes` will trigger the deployment.
```sh
terraform apply -var=tag=v4.72.0
```
or, you can add the additional supported terraform variables, to overwrite the default values. You can choose which ones are included/overwritten. If a variable is not defined, the default value configured in [./variables.tf](variables.tf) is used.
Below is an example with all available variables.
```sh
terraform apply -var=tag=v4.72.0 -var=fleet_task_count=20 -var=fleet_task_memory=4096 -var=fleet_task_cpu=512 -var=database_instance_size=db.t4g.large -var=database_instance_count=3 -var=redis_instance_size=cache.t4g.small -var=redis_instance_count=3
```
# Destroy environment manually
1. Clone the repository (if not already cloned)
2. Initialize terraform
```sh
terraform init
```
3. Select your workspace
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
3. Destroy the environment
```sh
terraform destroy
```
# Delete the workspace
Once all resources have been removed from the terraform workspace, remove the terraform workspace.
```sh
terraform workspace delete <workspace_name>
```

View file

@ -0,0 +1 @@
header-from: .header.md

View file

@ -0,0 +1,198 @@
# Deploy Loadtesting Infrastructure
# Before we begin
Although deployments through the github action should be prioritized, for manual deployments you will need.
- Terraform v1.10.2
- Docker
- Go
Additionally, refer to the [Reference Architecture sizing recommendations](https://fleetdm.com/docs/deploy/reference-architectures#aws) for loadtest infrastructure sizing.
# Deploy with Github Actions (Coming Soon)
## Deploy/Destroy environment with Github Action
1. [Navigate to the github action](https://github.com/fleetdm/fleet/actions/workflows/loadtest-infra.yml)
2. On the top right corner, select the `Run Workflow` dropdown.
3. Fill out the details for the deployment.
4. After all details have been filled out, you will hit the green `Run Workflow` button, directly under the inputs. For `terraform_action` select `Plan`, `Apply`, or `Destroy`.
- Plan will show you the results of a dry-run
- Apply will deploy changes to the environment
- Destroy will destroy your environment
# Deploy environment manually
1. Clone the repository
2. Initialize terraform
```sh
terraform init
```
3. Create a new the terraform workspace or select an existing workspace for your environment. The terraform workspace will be used in different area's of Terraform to drive uniqueness and access to the environment.
```sh
terraform workspace new <workspace_name>
```
or, if your workspace already exists
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
4. Ensure that your new or existing workspace is in use.
```sh
terraform workspace show
```
5. Deploy the environment (will also trigger migrations automatically)
> Note: Terraform will prompt you for confirmation to trigger the deployment. If everything looks ok, submitting `yes` will trigger the deployment.
```sh
terraform apply -var=tag=v4.72.0
```
or, you can add the additional supported terraform variables, to overwrite the default values. You can choose which ones are included/overwritten. If a variable is not defined, the default value configured in [./variables.tf](variables.tf) is used.
Below is an example with all available variables.
```sh
terraform apply -var=tag=v4.72.0 -var=fleet_task_count=20 -var=fleet_task_memory=4096 -var=fleet_task_cpu=512 -var=database_instance_size=db.t4g.large -var=database_instance_count=3 -var=redis_instance_size=cache.t4g.small -var=redis_instance_count=3
```
# Destroy environment manually
1. Clone the repository (if not already cloned)
2. Initialize terraform
```sh
terraform init
```
3. Select your workspace
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
3. Destroy the environment
```sh
terraform destroy
```
# Delete the workspace
Once all resources have been removed from the terraform workspace, remove the terraform workspace.
```sh
terraform workspace delete <workspace_name>
```
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 5.68.0 |
| <a name="requirement_docker"></a> [docker](#requirement\_docker) | ~> 2.16.0 |
| <a name="requirement_git"></a> [git](#requirement\_git) | ~> 0.1.0 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | 6.14.1 |
| <a name="provider_docker"></a> [docker](#provider\_docker) | 2.16.0 |
| <a name="provider_git"></a> [git](#provider\_git) | 0.1.0 |
| <a name="provider_random"></a> [random](#provider\_random) | 3.7.2 |
| <a name="provider_terraform"></a> [terraform](#provider\_terraform) | n/a |
| <a name="provider_tls"></a> [tls](#provider\_tls) | 4.1.0 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_acm"></a> [acm](#module\_acm) | terraform-aws-modules/acm/aws | 4.3.1 |
| <a name="module_loadtest"></a> [loadtest](#module\_loadtest) | github.com/fleetdm/fleet-terraform//byo-vpc | tf-mod-root-v1.18.3 |
| <a name="module_logging_alb"></a> [logging\_alb](#module\_logging\_alb) | github.com/fleetdm/fleet-terraform//addons/logging-alb | tf-mod-addon-logging-alb-v1.6.1 |
| <a name="module_logging_firehose"></a> [logging\_firehose](#module\_logging\_firehose) | github.com/fleetdm/fleet-terraform//addons/logging-destination-firehose | tf-mod-addon-logging-destination-firehose-v1.2.4 |
| <a name="module_mdm"></a> [mdm](#module\_mdm) | github.com/fleetdm/fleet-terraform/addons/mdm?depth=1&ref=tf-mod-addon-mdm-v2.0.0 | n/a |
| <a name="module_migrations"></a> [migrations](#module\_migrations) | github.com/fleetdm/fleet-terraform//addons/migrations | tf-mod-addon-migrations-v2.1.0 |
| <a name="module_osquery-carve"></a> [osquery-carve](#module\_osquery-carve) | github.com/fleetdm/fleet-terraform//addons/osquery-carve | tf-mod-addon-osquery-carve-v1.1.1 |
| <a name="module_ses"></a> [ses](#module\_ses) | github.com/fleetdm/fleet-terraform//addons/ses | tf-mod-addon-ses-v1.4.0 |
| <a name="module_vuln-processing"></a> [vuln-processing](#module\_vuln-processing) | github.com/fleetdm/fleet-terraform//addons/external-vuln-scans | tf-mod-addon-external-vuln-scans-v2.3.0 |
## Resources
| Name | Type |
|------|------|
| [aws_ecr_repository.fleet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository) | resource |
| [aws_iam_policy.enroll](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_policy.license](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role_policy_attachment.enroll](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_kms_alias.alias](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_alias) | resource |
| [aws_kms_key.customer_data_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_kms_key.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_lb.internal](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb) | resource |
| [aws_lb_listener.internal](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener) | resource |
| [aws_lb_target_group.internal](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_target_group) | resource |
| [aws_route53_record.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource |
| [aws_secretsmanager_secret_version.scep](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) | resource |
| [aws_security_group.internal](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [docker_registry_image.fleet](https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/resources/registry_image) | resource |
| [random_password.challenge](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource |
| [random_pet.db_secret_postfix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource |
| [tls_private_key.cloudfront_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [tls_private_key.scep_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [tls_self_signed_cert.scep_cert](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/self_signed_cert) | resource |
| [aws_acm_certificate.certificate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_ecr_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecr_authorization_token) | data source |
| [aws_iam_policy_document.enroll](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.license](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
| [aws_route53_zone.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source |
| [aws_secretsmanager_secret.license](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source |
| [aws_secretsmanager_secret_version.enroll_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source |
| [docker_registry_image.dockerhub](https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/data-sources/registry_image) | data source |
| [git_repository.tf](https://registry.terraform.io/providers/paultyng/git/latest/docs/data-sources/repository) | data source |
| [terraform_remote_state.shared](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_database_instance_count"></a> [database\_instance\_count](#input\_database\_instance\_count) | The number of Aurora database instances | `number` | `2` | no |
| <a name="input_database_instance_size"></a> [database\_instance\_size](#input\_database\_instance\_size) | The instance size for Aurora database instances | `string` | `"db.t4g.medium"` | no |
| <a name="input_fleet_task_count"></a> [fleet\_task\_count](#input\_fleet\_task\_count) | The total number (max) that ECS can scale Fleet containers up to | `number` | `5` | no |
| <a name="input_fleet_task_cpu"></a> [fleet\_task\_cpu](#input\_fleet\_task\_cpu) | The CPU configuration for Fleet containers | `number` | `512` | no |
| <a name="input_fleet_task_memory"></a> [fleet\_task\_memory](#input\_fleet\_task\_memory) | The memory configuration for Fleet containers | `number` | `4096` | no |
| <a name="input_redis_instance_count"></a> [redis\_instance\_count](#input\_redis\_instance\_count) | The number of Elasticache nodes | `number` | `3` | no |
| <a name="input_redis_instance_size"></a> [redis\_instance\_size](#input\_redis\_instance\_size) | The instance size for Elasticache nodes | `string` | `"cache.t4g.micro"` | no |
| <a name="input_tag"></a> [tag](#input\_tag) | The tag to deploy. This would be the same as the branch name | `string` | `"v4.72.0"` | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_ecs_arn"></a> [ecs\_arn](#output\_ecs\_arn) | n/a |
| <a name="output_ecs_cluster"></a> [ecs\_cluster](#output\_ecs\_cluster) | n/a |
| <a name="output_ecs_execution_arn"></a> [ecs\_execution\_arn](#output\_ecs\_execution\_arn) | n/a |
| <a name="output_enroll_secret_arn"></a> [enroll\_secret\_arn](#output\_enroll\_secret\_arn) | n/a |
| <a name="output_internal_alb_dns_name"></a> [internal\_alb\_dns\_name](#output\_internal\_alb\_dns\_name) | n/a |
| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | n/a |
| <a name="output_logging_config"></a> [logging\_config](#output\_logging\_config) | n/a |
| <a name="output_security_groups"></a> [security\_groups](#output\_security\_groups) | n/a |
| <a name="output_server_url"></a> [server\_url](#output\_server\_url) | n/a |

View file

@ -0,0 +1,46 @@
data "aws_ecr_authorization_token" "token" {}
data "docker_registry_image" "dockerhub" {
name = "fleetdm/fleet:${var.tag}"
}
resource "random_pet" "db_secret_postfix" {
length = 1
}
resource "aws_kms_key" "main" {
description = "${local.customer}-${random_pet.db_secret_postfix.id}"
deletion_window_in_days = 10
enable_key_rotation = true
}
resource "aws_ecr_repository" "fleet" {
name = local.customer
image_tag_mutability = "IMMUTABLE"
image_scanning_configuration {
scan_on_push = true
}
encryption_configuration {
encryption_type = "KMS"
kms_key = aws_kms_key.main.arn
}
force_delete = true
}
resource "docker_registry_image" "fleet" {
name = "${resource.aws_ecr_repository.fleet.repository_url}:${var.tag}-${split(":", data.docker_registry_image.dockerhub.sha256_digest)[1]}"
keep_remotely = true
build {
context = "../docker/"
build_args = {
TAG = var.tag
}
pull_parent = true
}
}

View file

@ -0,0 +1,39 @@
data "aws_iam_policy_document" "license" {
statement {
effect = "Allow"
actions = ["secretsmanager:GetSecretValue"]
resources = [
data.aws_secretsmanager_secret.license.arn
]
}
}
resource "aws_iam_policy" "license" {
name = "${local.customer}-license-iam-policy"
policy = data.aws_iam_policy_document.license.json
}
data "aws_iam_policy_document" "enroll" {
statement {
effect = "Allow"
actions = ["secretsmanager:GetSecretValue"]
resources = [
data.aws_secretsmanager_secret_version.enroll_secret.arn
]
}
}
resource "aws_iam_policy" "enroll" {
name = "${local.customer}-enroll-policy"
description = "IAM policy that Fleet application uses to define access to AWS resources"
policy = data.aws_iam_policy_document.enroll.json
}
resource "aws_iam_role_policy_attachment" "enroll" {
policy_arn = aws_iam_policy.enroll.arn
role = "${local.customer}-execution-role"
depends_on = [
module.loadtest
]
}

View file

@ -0,0 +1,60 @@
resource "aws_security_group" "internal" {
name = "${local.prefix}-internal"
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
resource "aws_lb" "internal" {
name = "${local.prefix}-internal"
internal = true
security_groups = [
resource.aws_security_group.internal.id,
]
subnets = data.terraform_remote_state.shared.outputs.vpc.private_subnets
idle_timeout = 905
drop_invalid_header_fields = true
}
resource "aws_lb_listener" "internal" {
load_balancer_arn = resource.aws_lb.internal.arn
port = 80
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = resource.aws_lb_target_group.internal.arn
}
}
resource "aws_lb_target_group" "internal" {
name = "${local.prefix}-internal"
protocol = "HTTP"
target_type = "ip"
port = "80"
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
deregistration_delay = 30
load_balancing_algorithm_type = "least_outstanding_requests"
health_check {
path = "/healthz"
matcher = "200"
timeout = 10
interval = 15
healthy_threshold = 5
unhealthy_threshold = 5
}
}

View file

@ -0,0 +1,12 @@
resource "aws_kms_key" "customer_data_key" {
description = "key used to encrypt sensitive data stored in terraform"
}
resource "aws_kms_alias" "alias" {
name = "alias/${local.customer}-terraform-encrypted"
target_key_id = aws_kms_key.customer_data_key.id
}
output "kms_key_id" {
value = aws_kms_key.customer_data_key.id
}

View file

@ -0,0 +1,113 @@
locals {
customer = "fleet-${terraform.workspace}"
prefix = "fleet-${terraform.workspace}"
fleet_image = "${aws_ecr_repository.fleet.repository_url}:${var.tag}-${split(":", data.docker_registry_image.dockerhub.sha256_digest)[1]}"
extra_environment_variables = {
CLOUDWATCH_NAMESPACE = "fleet-loadtest-migration"
CLOUDWATCH_REGION = "us-east-2"
# PROMETHEUS_SCRAPE_URL = "http://localhost:8080/metrics"
ELASTIC_APM_SERVER_URL = "https://loadtest.fleetdm.com:8200"
ELASTIC_APM_SERVICE_NAME = "fleet"
ELASTIC_APM_ENVIRONMENT = "${terraform.workspace}"
ELASTIC_APM_TRANSACTION_SAMPLE_RATE = "0.004"
ELASTIC_APM_SERVICE_VERSION = "${var.tag}-${split(":", data.docker_registry_image.dockerhub.sha256_digest)[1]}"
FLEET_VULNERABILITIES_DATABASES_PATH = "/home/fleet"
FLEET_OSQUERY_ENABLE_ASYNC_HOST_PROCESSING = "false"
FLEET_LOGGING_JSON = "true"
FLEET_LOGGING_DEBUG = "true"
FLEET_LOGGING_TRACING_ENABLED = "true"
FLEET_LOGGING_TRACING_TYPE = "elasticapm"
FLEET_MYSQL_MAX_OPEN_CONNS = "10"
FLEET_MYSQL_READ_REPLICA_MAX_OPEN_CONNS = "10"
FLEET_OSQUERY_ASYNC_HOST_REDIS_SCAN_KEYS_COUNT = "10000"
FLEET_REDIS_MAX_OPEN_CONNS = "500"
FLEET_REDIS_MAX_IDLE_CONNS = "500"
# Load TLS Certificate for RDS Authentication
FLEET_MYSQL_TLS_CA = local.cert_path
FLEET_MYSQL_READ_REPLICA_TLS_CA = local.cert_path
}
extra_secrets = {
FLEET_LICENSE_KEY = data.aws_secretsmanager_secret.license.arn
}
# Private Subnets from VPN VPC
vpn_cidr_blocks = [
"10.255.1.0/24",
"10.255.2.0/24",
"10.255.3.0/24",
]
/*
configurations below are necessary for MySQL TLS authentication
MySQL TLS Settings to download and store TLS Certificate
ca_thumbprint is maintained in the infrastructure/cloud/shared/
ca_thumbprint is the sha1 thumbprint value of the following certificate: aws rds describe-db-instances --filters='Name=db-cluster-id,Values='${cluster_name}'' | jq '.DBInstances.[0].CACertificateIdentifier' | sed 's/\"//g'
You can retrieve the value with the following command: aws rds describe-certificates --certificate-identifier=${ca_cert_val} | jq '.Certificates.[].Thumbprint' | sed 's/\"//g'
*/
ca_cert_thumbprint = "8cf85e3e2bdbcbe2c4a34c1e85828fb29833e87f"
rds_container_path = "/tmp/rds-tls"
cert_path = "${local.rds_container_path}/${data.aws_region.current.region}.pem"
# load the certificate with a side car into a volume mount
sidecars = [
{
name = "rds-tls-ca-retriever"
image = "public.ecr.aws/docker/library/alpine@sha256:8a1f59ffb675680d47db6337b49d22281a139e9d709335b492be023728e11715"
entrypoint = ["/bin/sh", "-c"]
command = [templatefile("./template/mysql_ca_tls_retrieval.sh.tpl", {
aws_region = data.aws_region.current.region
container_path = local.rds_container_path
ca_cert_thumbprint = local.ca_cert_thumbprint
})]
logConfiguration = {
logDriver = "awslogs"
options = {
"awslogs-group" = local.customer
"awslogs-region" = data.aws_region.current.region
"awslogs-stream-prefix" = "rds-tls-ca-retriever"
}
}
environment = []
mountPoints = [
{
sourceVolume = "rds-tls-certs",
containerPath = local.rds_container_path
}
]
essential = false
},
# {
# name = "prometheus-exporter"
# image = "${data.terraform_remote_state.shared.outputs.ecr.repository_url}:latest"
# entrypoint = []
# command = ["sleep"]
# logConfiguration = {
# logDriver = "awslogs"
# options = {
# "awslogs-group" = local.customer
# "awslogs-region" = data.aws_region.current.region
# "awslogs-stream-prefix" = "fleet-prometheus-exporter"
# }
# }
# environment = [
# {
# name = "CLOUDWATCH_NAMESPACE"
# value = "fleet-loadtest"
# },
# {
# name = "CLOUDWATCH_REGION"
# value = "us-east-2"
# },
# {
# name = "PROMETHEUS_SCRAPE_URL"
# value = "http://localhost:8080/metrics"
# },
# ]
# mountPoints = []
# essential = false
# }
]
}

View file

@ -0,0 +1,265 @@
data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
data "git_repository" "tf" {
path = "${path.module}/../../../../"
}
data "aws_acm_certificate" "certificate" {
domain = "*.${data.aws_route53_zone.main.name}"
statuses = ["ISSUED"]
types = ["AMAZON_ISSUED"]
most_recent = true
}
data "aws_route53_zone" "main" {
name = "loadtest.fleetdm.com."
private_zone = false
}
resource "aws_route53_record" "main" {
zone_id = data.aws_route53_zone.main.id
name = "${local.customer}.loadtest.fleetdm.com"
type = "A"
alias {
name = module.loadtest.byo-db.alb.lb_dns_name
zone_id = module.loadtest.byo-db.alb.lb_zone_id
evaluate_target_health = true
}
}
module "loadtest" {
source = "github.com/fleetdm/fleet-terraform//byo-vpc?ref=tf-mod-root-v1.18.3"
vpc_config = {
name = local.customer
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
networking = {
subnets = data.terraform_remote_state.shared.outputs.vpc.private_subnets
}
}
rds_config = {
name = local.customer
instance_class = var.database_instance_size
replicas = var.database_instance_count
engine_version = "8.0.mysql_aurora.3.08.2"
snapshot_identifier = "arn:aws:rds:us-east-2:917007347864:cluster-snapshot:cleaned-8-0-teams-fixes-v4-55-0-minimum"
preferred_maintenance_window = "fri:04:00-fri:05:00"
# VPN
subnets = data.terraform_remote_state.shared.outputs.vpc.database_subnets
allowed_cidr_blocks = concat(data.terraform_remote_state.shared.outputs.vpc.private_subnets_cidr_blocks, local.vpn_cidr_blocks)
db_parameters = {
# 8mb up from 262144 (256k) default
sort_buffer_size = 8388608
}
}
redis_config = {
name = local.customer
instance_type = var.redis_instance_size
cluster_size = var.redis_instance_count
subnets = data.terraform_remote_state.shared.outputs.vpc.private_subnets
elasticache_subnet_group_name = data.terraform_remote_state.shared.outputs.vpc.elasticache_subnet_group_name
allowed_cidrs = concat(data.terraform_remote_state.shared.outputs.vpc.private_subnets_cidr_blocks, local.vpn_cidr_blocks)
availability_zones = ["us-east-2a", "us-east-2b", "us-east-2c"]
parameter = [
{ name = "client-output-buffer-limit-pubsub-hard-limit", value = 0 },
{ name = "client-output-buffer-limit-pubsub-soft-limit", value = 0 },
{ name = "client-output-buffer-limit-pubsub-soft-seconds", value = 0 },
]
}
ecs_cluster = {
cluster_name = local.customer
}
fleet_config = {
image = local.fleet_image
family = local.customer
mem = var.fleet_task_memory
cpu = var.fleet_task_cpu
security_group_name = local.customer
networking = {
ingress_sources = {
security_groups = [
resource.aws_security_group.internal.id,
]
}
}
extra_load_balancers = [{
target_group_arn = resource.aws_lb_target_group.internal.arn
container_name = "fleet"
container_port = 8080
}]
autoscaling = {
min_capacity = var.fleet_task_count
max_capacity = var.fleet_task_count
}
awslogs = {
name = local.customer
retention = 365
}
iam = {
role = {
name = "${local.customer}-role"
policy_name = "${local.customer}-iam-policy"
}
execution = {
name = "${local.customer}-execution-role"
policy_name = "${local.customer}-iam-policy-execution"
}
}
extra_iam_policies = concat(
module.osquery-carve.fleet_extra_iam_policies,
module.ses.fleet_extra_iam_policies,
module.logging_firehose.fleet_extra_iam_policies,
)
# Add these for MDM or cloudfront
extra_execution_iam_policies = concat(
module.mdm.extra_execution_iam_policies,
# module.cloudfront-software-installers.extra_execution_iam_policies,
[
resource.aws_iam_policy.license.arn
],
)
extra_environment_variables = merge(
module.osquery-carve.fleet_extra_environment_variables,
module.vuln-processing.extra_environment_variables,
module.ses.fleet_extra_environment_variables,
module.logging_firehose.fleet_extra_environment_variables,
local.extra_environment_variables,
)
extra_secrets = merge(
module.mdm.extra_secrets,
# module.cloudfront-software-installers.extra_secrets,
local.extra_secrets
)
private_key_secret_name = "${local.customer}-fleet-server-private-key"
volumes = [
{
name = "rds-tls-certs"
}
]
mount_points = [
{
sourceVolume = "rds-tls-certs",
containerPath = local.rds_container_path
}
]
depends_on = [
{
containerName = "rds-tls-ca-retriever"
condition = "SUCCESS"
},
# {
# containerName = "prometheus-exporter"
# condition = "START"
# }
]
sidecars = local.sidecars
}
alb_config = {
name = local.customer
enable_deletion_protection = false
certificate_arn = data.aws_acm_certificate.certificate.arn
subnets = data.terraform_remote_state.shared.outputs.vpc.public_subnets
access_logs = {
bucket = module.logging_alb.log_s3_bucket_id
prefix = local.customer
enabled = true
}
idle_timeout = 905
}
}
module "acm" {
source = "terraform-aws-modules/acm/aws"
version = "4.3.1"
domain_name = "${local.customer}.loadtest.fleetdm.com"
zone_id = data.aws_route53_zone.main.id
create_certificate = false
wait_for_validation = false
}
module "ses" {
source = "github.com/fleetdm/fleet-terraform//addons/ses?ref=tf-mod-addon-ses-v1.4.0"
zone_id = data.aws_route53_zone.main.id
domain = "${terraform.workspace}.loadtest.fleetdm.com"
extra_txt_records = []
custom_mail_from = {
enabled = true
domain_prefix = "mail"
}
}
module "migrations" {
source = "github.com/fleetdm/fleet-terraform//addons/migrations?ref=tf-mod-addon-migrations-v2.1.0"
ecs_cluster = module.loadtest.byo-db.byo-ecs.service.cluster
task_definition = module.loadtest.byo-db.byo-ecs.task_definition.family
task_definition_revision = module.loadtest.byo-db.byo-ecs.task_definition.revision
subnets = module.loadtest.byo-db.byo-ecs.service.network_configuration[0].subnets
security_groups = module.loadtest.byo-db.byo-ecs.service.network_configuration[0].security_groups
ecs_service = module.loadtest.byo-db.byo-ecs.service.name
desired_count = module.loadtest.byo-db.byo-ecs.appautoscaling_target.min_capacity
min_capacity = module.loadtest.byo-db.byo-ecs.appautoscaling_target.min_capacity
depends_on = [
module.loadtest,
module.vuln-processing
]
}
module "vuln-processing" {
source = "github.com/fleetdm/fleet-terraform//addons/external-vuln-scans?ref=tf-mod-addon-external-vuln-scans-v2.3.0"
ecs_cluster = module.loadtest.byo-db.byo-ecs.service.cluster
execution_iam_role_arn = module.loadtest.byo-db.byo-ecs.execution_iam_role_arn
subnets = module.loadtest.byo-db.byo-ecs.service.network_configuration[0].subnets
security_groups = module.loadtest.byo-db.byo-ecs.service.network_configuration[0].security_groups
fleet_config = module.loadtest.byo-db.byo-ecs.fleet_config
task_role_arn = module.loadtest.byo-db.byo-ecs.iam_role_arn
fleet_server_private_key_secret_arn = module.loadtest.byo-db.byo-ecs.fleet_server_private_key_secret_arn
awslogs_config = {
group = module.loadtest.byo-db.byo-ecs.fleet_config.awslogs.name
region = module.loadtest.byo-db.byo-ecs.fleet_config.awslogs.region
prefix = module.loadtest.byo-db.byo-ecs.fleet_config.awslogs.prefix
}
fleet_s3_software_installers_config = module.loadtest.byo-db.byo-ecs.fleet_s3_software_installers_config
}
module "mdm" {
source = "github.com/fleetdm/fleet-terraform/addons/mdm?depth=1&ref=tf-mod-addon-mdm-v2.0.0"
apn_secret_name = null
scep_secret_name = "${local.customer}-scep"
abm_secret_name = null
enable_windows_mdm = true
enable_apple_mdm = false
}
module "osquery-carve" {
source = "github.com/fleetdm/fleet-terraform//addons/osquery-carve?ref=tf-mod-addon-osquery-carve-v1.1.1"
osquery_carve_s3_bucket = {
name = "${local.customer}-osquery-carve"
}
}
module "logging_alb" {
source = "github.com/fleetdm/fleet-terraform//addons/logging-alb?ref=tf-mod-addon-logging-alb-v1.6.1"
prefix = local.customer
alt_path_prefix = local.customer
enable_athena = true
}
module "logging_firehose" {
source = "github.com/fleetdm/fleet-terraform//addons/logging-destination-firehose?ref=tf-mod-addon-logging-destination-firehose-v1.2.4"
prefix = local.customer
osquery_results_s3_bucket = {
name = "${local.customer}-osquery-results-firehose-policy"
expires_days = 1
}
osquery_status_s3_bucket = {
name = "${local.customer}-osquery-status-firehose-policy"
expires_days = 1
}
audit_s3_bucket = {
name = "${local.customer}-audit-firehose-policy"
expires_days = 1
}
}

View file

@ -0,0 +1,15 @@
resource "random_password" "challenge" {
length = 12
special = false
}
resource "aws_secretsmanager_secret_version" "scep" {
secret_id = module.mdm.scep.id
secret_string = jsonencode(
{
FLEET_MDM_APPLE_SCEP_CERT_BYTES = tls_self_signed_cert.scep_cert.cert_pem
FLEET_MDM_APPLE_SCEP_KEY_BYTES = tls_private_key.scep_key.private_key_pem
FLEET_MDM_APPLE_SCEP_CHALLENGE = random_password.challenge.result
}
)
}

View file

@ -0,0 +1,37 @@
output "server_url" {
value = "https://${aws_route53_record.main.fqdn}"
}
output "internal_alb_dns_name" {
value = resource.aws_lb.internal.dns_name
}
output "ecs_cluster" {
sensitive = true
value = module.loadtest.byo-db.byo-ecs.service.cluster
}
output "security_groups" {
sensitive = true
value = module.loadtest.byo-db.byo-ecs.service.network_configuration[0].security_groups
}
output "ecs_arn" {
sensitive = true
value = module.loadtest.byo-db.byo-ecs.iam_role_arn
}
output "ecs_execution_arn" {
sensitive = true
value = module.loadtest.byo-db.byo-ecs.execution_iam_role_arn
}
output "logging_config" {
sensitive = true
value = module.loadtest.byo-db.byo-ecs.logging_config
}
output "enroll_secret_arn" {
sensitive = true
value = data.aws_secretsmanager_secret_version.enroll_secret.arn
}

View file

@ -0,0 +1,67 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 5.68.0"
}
docker = {
source = "kreuzwerker/docker"
version = "~> 2.16.0"
}
git = {
source = "paultyng/git"
version = "~> 0.1.0"
}
}
backend "s3" {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
assume_role = {
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
}
provider "aws" {
region = "us-east-2"
default_tags {
tags = {
environment = "loadtest"
terraform = "https://github.com/fleetdm/fleet/tree/main/infrastructure/loadtesting"
state = "s3://fleet-terraform-state20220408141538466600000002/loadtesting/${terraform.workspace}/loadtesting/loadtesting/terraform.tfstate"
workspace = "${terraform.workspace}"
}
}
}
data "terraform_remote_state" "shared" {
backend = "s3"
config = {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/shared/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
assume_role = {
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
}
provider "docker" {
# Configuration options
registry_auth {
address = "${data.aws_caller_identity.current.account_id}.dkr.ecr.us-east-2.amazonaws.com"
username = data.aws_ecr_authorization_token.token.user_name
password = data.aws_ecr_authorization_token.token.password
}
}
provider "git" {}

View file

@ -0,0 +1,7 @@
data "aws_secretsmanager_secret" "license" {
name = "/fleet/license"
}
data "aws_secretsmanager_secret_version" "enroll_secret" {
secret_id = data.terraform_remote_state.shared.outputs.enroll_secret.id
}

View file

@ -0,0 +1,10 @@
module "cloudfront-software-installers" {
source = "github.com/fleetdm/fleet-terraform/addons/cloudfront-software-installers?ref=tf-mod-addon-cloudfront-software-installers-v1.0.1"
customer = terraform.workspace
s3_bucket = module.loadtest.byo-db.byo-ecs.fleet_s3_software_installers_config.bucket_name
s3_kms_key_id = module.loadtest.byo-db.byo-ecs.fleet_s3_software_installers_config.kms_key_id
public_key = tls_private_key.cloudfront_key.public_key_pem
private_key = tls_private_key.cloudfront_key.private_key_pem
enable_logging = true
logging_s3_bucket = module.logging_alb.log_s3_bucket_id
}

View file

@ -0,0 +1,14 @@
#!/bin/bash
apk add coreutils openssl
wget --quiet https://truststore.pki.rds.amazonaws.com/${aws_region}/${aws_region}-bundle.pem -O ${aws_region}-bundle.dl.pem
csplit -z -k -f cert. -b '%02d.pem' ${aws_region}-bundle.dl.pem '/-----BEGIN CERTIFICATE-----/' '{*}'
for filename in cert.*;
do
thumbprint=$(openssl x509 -in $${filename} -noout -fingerprint | cut -c 18- | sed 's/\://g' | awk '{print tolower($0)}')
if [[ "${ca_cert_thumbprint}" = "$${thumbprint}" ]];
then
mv $${filename} ${container_path}/${aws_region}.pem
fi
done

View file

@ -0,0 +1,31 @@
# MDM
resource "tls_private_key" "scep_key" {
algorithm = "RSA"
rsa_bits = 4096
}
resource "tls_self_signed_cert" "scep_cert" {
private_key_pem = tls_private_key.scep_key.private_key_pem
subject {
common_name = "Fleet Root CA"
organization = "Fleet."
country = "US"
}
is_ca_certificate = true
validity_period_hours = 87648
allowed_uses = [
"cert_signing",
"crl_signing",
"key_encipherment",
"digital_signature",
]
}
# Cloudfront
resource "tls_private_key" "cloudfront_key" {
algorithm = "RSA"
rsa_bits = 2048
}

View file

@ -0,0 +1,61 @@
variable "tag" {
description = "The tag to deploy. This would be the same as the branch name"
default = "v4.72.0"
}
variable "fleet_task_count" {
description = "The total number (max) that ECS can scale Fleet containers up to"
type = number
default = 5
validation {
condition = var.fleet_task_count >= 0
error_message = "var.fleet_task_count must be greater than or equal to 0."
}
}
variable "fleet_task_memory" {
description = "The memory configuration for Fleet containers"
type = number
default = 4096
}
variable "fleet_task_cpu" {
description = "The CPU configuration for Fleet containers"
type = number
default = 512
}
variable "database_instance_size" {
description = "The instance size for Aurora database instances"
type = string
default = "db.t4g.medium"
}
variable "database_instance_count" {
description = "The number of Aurora database instances"
type = number
default = 2
validation {
condition = var.database_instance_count >= 1
error_message = "var.database_instance_count must be greater than or equal to 1."
}
}
variable "redis_instance_size" {
description = "The instance size for Elasticache nodes"
type = string
default = "cache.t4g.micro"
}
variable "redis_instance_count" {
description = "The number of Elasticache nodes"
type = number
default = 3
validation {
condition = var.redis_instance_count >= 3
error_message = "var.redis_instance_count must be greater than or equal to 3."
}
}

View file

@ -0,0 +1,103 @@
# Deploy osquery perf to a Loadtest environment
# Before we begin
Although deployments through the github action should be prioritized, for manual deployments you will need.
- [A loadtest environment](../infra/README.md)
- Terraform v1.10.2
- Docker
- Go
# Deploy with Github Actions (Coming Soon)
1. [Navigate to the github action](https://github.com/fleetdm/fleet/actions/workflows/loadtest-osquery-perf.yml)
2. On the top right corner, select the `Run Workflow` dropdown.
3. Fill out the details for the deployment.
4. After all details have been filled out, you will hit the green `Run Workflow` button, directly under the inputs. For `terraform_action` select `Plan`, `Apply`, or `Destroy`.
- Plan will show you the results of a dry-run
- Apply will deploy changes to the environment
- Destroy will destroy your environment
# Deploy osquery perf manually
1. Clone the repository
2. Initialize terraform
```sh
terraform init
```
3. Create a new the terraform workspace or select an existing workspace for your environment. The terraform workspace will be used in different area's of Terraform to drive uniqueness and access to the environment.
> Note: The workspace from the infrastructure deployment will not be carried over to this deployment. A new or existing workspace, specifically for osquery perf must be used.
>
> Your workspace name must match the workspace name that was used for the infrastructure deployment. Failure to use a matching workspace name can lead to deployments in another environment.
```sh
terraform workspace new <workspace_name>
```
or, if your workspace already exists
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
4. Ensure that your new or existing workspace is in use.
```sh
terraform workspace show
```
5. Deploy the environment (will also trigger migrations automatically)
> Note: Terraform will prompt you for confirmation to trigger the deployment. If everything looks ok, submitting `yes` will trigger the deployment.
```sh
terraform apply -var=tag=v4.73.0 -var=git_branch=fleet-v4.73.0
```
or, you can add the additional supported terraform variables, to overwrite the default values. You can choose which ones are included/overwritten. If a variable is not defined, the default value configured in [./variables.tf](variables.tf) is used.
Below is an example with all available variables.
```sh
terraform apply -var=tag=v4.73.0 -var=git_branch=fleet-v4.73.0 -var=loadtest_containers=20 -var=extra_flags=["--orbit_prob", "0.0"]
```
# Destroy osquery perf manually
1. Clone the repository (if not already cloned)
2. Initialize terraform
```sh
terraform init
```
3. Select your workspace
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
3. Destroy the environment
```sh
terraform destroy
```
# Delete the workspace
Once all resources have been removed from the terraform workspace, remove the terraform workspace.
```sh
terraform workspace delete <workspace_name>
```

View file

@ -0,0 +1 @@
header-from: .header.md

View file

@ -0,0 +1,155 @@
# Deploy osquery perf to a Loadtest environment
# Before we begin
Although deployments through the github action should be prioritized, for manual deployments you will need.
- [A loadtest environment](../infra/README.md)
- Terraform v1.10.2
- Docker
- Go
# Deploy with Github Actions (Coming Soon)
1. [Navigate to the github action](https://github.com/fleetdm/fleet/actions/workflows/loadtest-osquery-perf.yml)
2. On the top right corner, select the `Run Workflow` dropdown.
3. Fill out the details for the deployment.
4. After all details have been filled out, you will hit the green `Run Workflow` button, directly under the inputs. For `terraform_action` select `Plan`, `Apply`, or `Destroy`.
- Plan will show you the results of a dry-run
- Apply will deploy changes to the environment
- Destroy will destroy your environment
# Deploy osquery perf manually
1. Clone the repository
2. Initialize terraform
```sh
terraform init
```
3. Create a new the terraform workspace or select an existing workspace for your environment. The terraform workspace will be used in different area's of Terraform to drive uniqueness and access to the environment.
> Note: The workspace from the infrastructure deployment will not be carried over to this deployment. A new or existing workspace, specifically for osquery perf must be used.
>
> Your workspace name must match the workspace name that was used for the infrastructure deployment. Failure to use a matching workspace name can lead to deployments in another environment.
```sh
terraform workspace new <workspace_name>
```
or, if your workspace already exists
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
4. Ensure that your new or existing workspace is in use.
```sh
terraform workspace show
```
5. Deploy the environment (will also trigger migrations automatically)
> Note: Terraform will prompt you for confirmation to trigger the deployment. If everything looks ok, submitting `yes` will trigger the deployment.
```sh
terraform apply -var=tag=v4.73.0 -var=git_branch=fleet-v4.73.0
```
or, you can add the additional supported terraform variables, to overwrite the default values. You can choose which ones are included/overwritten. If a variable is not defined, the default value configured in [./variables.tf](variables.tf) is used.
Below is an example with all available variables.
```sh
terraform apply -var=tag=v4.73.0 -var=git_branch=fleet-v4.73.0 -var=loadtest_containers=20 -var=extra_flags=["--orbit_prob", "0.0"]
```
# Destroy osquery perf manually
1. Clone the repository (if not already cloned)
2. Initialize terraform
```sh
terraform init
```
3. Select your workspace
```sh
terraform workspace list
terraform workspace select <workspace_name>
```
3. Destroy the environment
```sh
terraform destroy
```
# Delete the workspace
Once all resources have been removed from the terraform workspace, remove the terraform workspace.
```sh
terraform workspace delete <workspace_name>
```
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 5.68.0 |
| <a name="requirement_docker"></a> [docker](#requirement\_docker) | ~> 2.16.0 |
| <a name="requirement_git"></a> [git](#requirement\_git) | ~> 0.1.0 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | 6.13.0 |
| <a name="provider_docker"></a> [docker](#provider\_docker) | 2.16.0 |
| <a name="provider_git"></a> [git](#provider\_git) | 0.1.0 |
| <a name="provider_terraform"></a> [terraform](#provider\_terraform) | n/a |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_osquery_perf"></a> [osquery\_perf](#module\_osquery\_perf) | github.com/fleetdm/fleet-terraform//addons/osquery-perf | tf-mod-addon-osquery-perf-v1.2.0 |
## Resources
| Name | Type |
|------|------|
| [docker_registry_image.loadtest](https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/resources/registry_image) | resource |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_ecr_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecr_authorization_token) | data source |
| [aws_ecr_repository.fleet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecr_repository) | data source |
| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
| [docker_registry_image.dockerhub](https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/data-sources/registry_image) | data source |
| [git_repository.tf](https://registry.terraform.io/providers/paultyng/git/latest/docs/data-sources/repository) | data source |
| [terraform_remote_state.infra](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source |
| [terraform_remote_state.shared](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_extra_flags"></a> [extra\_flags](#input\_extra\_flags) | Comma delimited list (string) for passing extra flags to osquery-perf containers | `list(string)` | <pre>[<br/> "--orbit_prob",<br/> "0.0"<br/>]</pre> | no |
| <a name="input_git_branch"></a> [git\_branch](#input\_git\_branch) | The git branch to use to build loadtest containers. Only needed if docker tag doesn't match the git branch | `string` | `null` | no |
| <a name="input_loadtest_containers"></a> [loadtest\_containers](#input\_loadtest\_containers) | Number of loadtest containers to deploy | `number` | `1` | no |
| <a name="input_tag"></a> [tag](#input\_tag) | The tag to deploy. This would be the same as the branch name | `string` | `""` | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_osquery_perf"></a> [osquery\_perf](#output\_osquery\_perf) | n/a |

View file

@ -0,0 +1,24 @@
data "aws_ecr_authorization_token" "token" {}
data "docker_registry_image" "dockerhub" {
name = "fleetdm/fleet:${var.tag}"
}
data "aws_ecr_repository" "fleet" {
name = local.customer
}
resource "docker_registry_image" "loadtest" {
name = "${data.aws_ecr_repository.fleet.repository_url}:loadtest-${local.loadtest_tag}-${split(":", data.docker_registry_image.dockerhub.sha256_digest)[1]}"
keep_remotely = true
build {
context = "../docker/"
dockerfile = "loadtest.Dockerfile"
platform = "linux/amd64"
build_args = {
TAG = local.loadtest_tag
}
pull_parent = true
}
}

View file

@ -0,0 +1,7 @@
locals {
customer = "fleet-${terraform.workspace}"
loadtest_containers = var.loadtest_containers
fleet_image = var.tag
loadtest_tag = var.git_branch != null ? var.git_branch : var.tag
}

View file

@ -0,0 +1,23 @@
data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
data "git_repository" "tf" {
path = "${path.module}/../../../../"
}
module "osquery_perf" {
source = "github.com/fleetdm/fleet-terraform//addons/osquery-perf?ref=tf-mod-addon-osquery-perf-v1.2.0"
customer_prefix = local.customer
ecs_cluster = data.terraform_remote_state.infra.outputs.ecs_cluster
loadtest_containers = local.loadtest_containers
subnets = data.terraform_remote_state.shared.outputs.vpc.private_subnets
security_groups = data.terraform_remote_state.infra.outputs.security_groups
ecs_iam_role_arn = data.terraform_remote_state.infra.outputs.ecs_arn
ecs_execution_iam_role_arn = data.terraform_remote_state.infra.outputs.ecs_execution_arn
server_url = "http://${data.terraform_remote_state.infra.outputs.internal_alb_dns_name}"
osquery_perf_image = "${data.aws_ecr_repository.fleet.repository_url}:loadtest-${local.loadtest_tag}-${split(":", data.docker_registry_image.dockerhub.sha256_digest)[1]}"
extra_flags = var.extra_flags
logging_options = data.terraform_remote_state.infra.outputs.logging_config
enroll_secret_arn = data.terraform_remote_state.infra.outputs.enroll_secret_arn
}

View file

@ -0,0 +1,3 @@
output "osquery_perf" {
value = module.osquery_perf
}

View file

@ -0,0 +1,84 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 5.68.0"
}
docker = {
source = "kreuzwerker/docker"
version = "~> 2.16.0"
}
git = {
source = "paultyng/git"
version = "~> 0.1.0"
}
}
backend "s3" {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/osqp/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
assume_role = {
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
}
provider "aws" {
region = "us-east-2"
default_tags {
tags = {
environment = "loadtest"
terraform = "https://github.com/fleetdm/fleet/tree/main/infrastructure/loadtesting"
state = "s3://fleet-terraform-state20220408141538466600000002/loadtesting/${terraform.workspace}/loadtesting/loadtesting/osqp/terraform.tfstate"
workspace = "${terraform.workspace}"
}
}
}
data "terraform_remote_state" "infra" {
backend = "s3"
workspace = terraform.workspace
config = {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
assume_role = {
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
}
data "terraform_remote_state" "shared" {
backend = "s3"
config = {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/shared/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
assume_role = {
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
}
provider "docker" {
# Configuration options
registry_auth {
address = "${data.aws_caller_identity.current.account_id}.dkr.ecr.us-east-2.amazonaws.com"
username = data.aws_ecr_authorization_token.token.user_name
password = data.aws_ecr_authorization_token.token.password
}
}
provider "git" {}

View file

@ -0,0 +1,23 @@
variable "tag" {
description = "The tag to deploy. This would be the same as the branch name"
type = string
default = ""
}
variable "git_branch" {
description = "The git branch to use to build loadtest containers. Only needed if docker tag doesn't match the git branch"
type = string
default = null
}
variable "loadtest_containers" {
description = "Number of loadtest containers to deploy"
type = number
default = 1
}
variable "extra_flags" {
description = "Comma delimited list (string) for passing extra flags to osquery-perf containers"
type = list(string)
default = ["--orbit_prob", "0.0"]
}

View file

@ -13,13 +13,13 @@ module "s3_bucket_for_logs" {
attach_require_latest_tls_policy = true
# attach_policy = var.extra_s3_log_policies != []
# policy = var.extra_s3_log_policies != [] ? data.aws_iam_policy_document.s3_log_bucket[0].json : null
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
acl = "private"
control_object_ownership = true
object_ownership = "ObjectWriter"
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
acl = "private"
control_object_ownership = true
object_ownership = "ObjectWriter"
server_side_encryption_configuration = {
rule = {
@ -68,10 +68,10 @@ module "athena-s3-bucket" {
attach_require_latest_tls_policy = true
# attach_policy = var.extra_s3_athena_policies != []
# policy = var.extra_s3_athena_policies != [] ? data.aws_iam_policy_document.s3_athena_bucket[0].json : null
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
server_side_encryption_configuration = {
rule = {
bucket_key_enabled = true
@ -110,7 +110,7 @@ resource "aws_athena_database" "logs" {
}
resource "aws_athena_workgroup" "logs" {
name = "fleet-loadtesting-logs"
name = "fleet-loadtesting-logs"
configuration {
enforce_workgroup_configuration = true

View file

@ -0,0 +1,106 @@
data "tls_certificate" "github" {
url = "https://token.actions.githubusercontent.com/.well-known/openid-configuration"
}
/*
It's possible to use the following to add Github as an OpenID Connect Provider and integrate
Github Actions as your CI/CD mechanism.
*/
resource "aws_iam_openid_connect_provider" "github" {
url = "https://token.actions.githubusercontent.com"
client_id_list = [
"sts.amazonaws.com",
]
thumbprint_list = [
data.tls_certificate.github.certificates[0].sha1_fingerprint
]
}
resource "aws_iam_role" "gha_role" {
name = "github-actions-role"
assume_role_policy = data.aws_iam_policy_document.gha_assume_role.json
}
resource "aws_iam_role_policy" "gha_role_policy" {
policy = data.aws_iam_policy_document.gha-permissions.json
role = aws_iam_role.gha_role.id
}
#####################
# AssumeRole
#
# Allow sts:AssumeRoleWithWebIdentity from GitHub via OIDC
# Customize your repository
#####################
data "aws_iam_policy_document" "gha_assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRoleWithWebIdentity"]
principals {
type = "Federated"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/token.actions.githubusercontent.com"
]
}
condition {
test = "StringLike"
variable = "token.actions.githubusercontent.com:sub"
values = ["repo:fleetdm/fleet:*"]
}
condition {
test = "StringEquals"
variable = "token.actions.githubusercontent.com:aud"
values = ["sts.amazonaws.com"]
}
}
}
// Customize the permissions for your deployment
data "aws_iam_policy_document" "gha-permissions" {
statement {
effect = "Allow"
actions = [
"ec2:*",
"cloudwatch:*",
"s3:*",
"lambda:*",
"ecs:*",
"rds:*",
"rds-data:*",
"secretsmanager:*",
"pi:*",
"ecr:*",
"iam:*",
"aps:*",
"vpc:*",
"kms:*",
"elasticloadbalancing:*",
"ce:*",
"cur:*",
"logs:*",
"cloudformation:*",
"ssm:*",
"sns:*",
"elasticache:*",
"application-autoscaling:*",
"acm:*",
"route53:*",
"dynamodb:*",
"kinesis:*",
"firehose:*",
"athena:*",
"glue:*",
"ses:*",
"wafv2:*",
"events:*",
"cloudfront:*",
"backup:*",
"backup-storage:*"
]
resources = ["*"]
}
}