diff --git a/.github/actions/action-infrastructure-stack/action.yaml b/.github/actions/action-infrastructure-stack/action.yaml index cc1c2f9d..4320b1e3 100644 --- a/.github/actions/action-infrastructure-stack/action.yaml +++ b/.github/actions/action-infrastructure-stack/action.yaml @@ -28,6 +28,9 @@ inputs: mgmt_account_id: description: "The management account ID for the action" required: true + account_id_dev: + description: "AWS dev account ID" + required: false release_tag: description: "The release tag identifying the timeline in the repository to deploy from" required: false @@ -59,6 +62,7 @@ runs: APPLICATION_TAG: ${{ inputs.application_tag }} RELEASE_TAG: ${{ inputs.release_tag }} COMMIT_HASH: ${{ inputs.commit_hash }} + AWS_ACCOUNT_ID_DEV: ${{ inputs.account_id_dev }} id: "action_stack" shell: bash run: | diff --git a/.github/actions/artefact-cleardown/action.yaml b/.github/actions/artefact-cleardown/action.yaml new file mode 100644 index 00000000..85f8b776 --- /dev/null +++ b/.github/actions/artefact-cleardown/action.yaml @@ -0,0 +1,20 @@ +name: "Cleardown redundant artefacts action" +description: "Delete the redundant artefacts" +inputs: + workspace: + description: "The name of the workspace to action the infrastructure into." + required: true + artefact_bucket_name: + description: "The name of the s3 bucket holding domain artefacts" + required: true + +runs: + using: composite + steps: + - name: Delete artefacts + id: delete_artefacts + shell: bash + run: | + export WORKSPACE=${{inputs.workspace}} + export ARTEFACT_BUCKET_NAME=${{inputs.artefact_bucket_name}} + ./scripts/workflow/cleardown-artefacts.sh diff --git a/.github/actions/check-tf-state/action.yaml b/.github/actions/check-tf-state/action.yaml new file mode 100644 index 00000000..bbc8cfb1 --- /dev/null +++ b/.github/actions/check-tf-state/action.yaml @@ -0,0 +1,20 @@ +name: "Check terraform state cleardown action" +description: "Check deletion of terraform state" +inputs: + workspace: + description: "The name of the workspace to check." + required: true + environment: + description: "The name of the environment to action the infrastructure into." + required: true + +runs: + using: composite + steps: + - name: Delete terraform state + id: delete_tf_state + shell: bash + run: | + export WORKSPACE=${{inputs.workspace}} + export ENVIRONMENT=${{inputs.environment}} + ./scripts/workflow/check-terraform-state.sh diff --git a/.github/actions/configure-credentials/action.yaml b/.github/actions/configure-credentials/action.yaml index f487f226..c0c34fef 100644 --- a/.github/actions/configure-credentials/action.yaml +++ b/.github/actions/configure-credentials/action.yaml @@ -23,6 +23,6 @@ runs: - name: configure aws credentials uses: aws-actions/configure-aws-credentials@v5.1.1 with: - role-to-assume: arn:aws:iam::${{ inputs.aws_account_id }}:role/saet-triage-api-dev-account-github-runner + role-to-assume: arn:aws:iam::${{ inputs.aws_account_id }}:role/${{ github.event.repository.name }}${{ inputs.environment != 'mgmt' && format('-{0}', inputs.environment) || '' }}-${{ inputs.type }}-github-runner role-session-name: GitHub_to_AWS_via_FederatedOIDC aws-region: ${{ inputs.aws_region }} diff --git a/.github/actions/derive-workspace/action.yaml b/.github/actions/derive-workspace/action.yaml new file mode 100644 index 00000000..518bce45 --- /dev/null +++ b/.github/actions/derive-workspace/action.yaml @@ -0,0 +1,24 @@ +name: "Derive Workspace action" +description: "Derives the name of the workspace for subsequent actions to run against" + +outputs: + workspace: + description: "The derived workspace name" + value: ${{ steps.derive-workspace.outputs.workspace }} + +runs: + using: "composite" + steps: + - name: "Derive workspace" + id: "derive-workspace" + shell: bash + run: | + export TRIGGER=${{ github.ref_type }} + export TRIGGER_ACTION=${{ github.event_name }} + export TRIGGER_REFERENCE=${{ github.ref_name }} + export TRIGGER_HEAD_REFERENCE=${{ github.head_ref }} + export TRIGGER_EVENT_REF=${{ github.event.ref}} + export COMMIT_HASH=$(git rev-parse --short $GITHUB_SHA) + . scripts/workflow/derive-workspace.sh + echo "Workspace Name: ${WORKSPACE}" + echo "workspace=${WORKSPACE}" >> $GITHUB_OUTPUT diff --git a/.github/actions/perform-static-analysis copy/action.yaml b/.github/actions/perform-static-analysis copy/action.yaml new file mode 100644 index 00000000..ec538dee --- /dev/null +++ b/.github/actions/perform-static-analysis copy/action.yaml @@ -0,0 +1,42 @@ +name: "Run SonarCloud static analysis" +description: "Perform SonarCloud static analysis" + +inputs: + sonar_organisation_key: + description: "Sonar organisation key, used to identify the project" + required: false + sonar_project_key: + description: "Sonar project key, used to identify the project" + required: false + sonar_token: + description: "Sonar token, the API key" + required: false + +runs: + using: "composite" + steps: + - name: "Download code coverage reports" + uses: actions/download-artifact@v4 + with: + path: coverage/ + pattern: coverage-*.xml + + - name: "Find coverage files" + id: coverage-files + shell: bash + run: | + FILES=$(find coverage -name 'coverage-*.xml' | paste -sd "," -) + echo "files=$FILES" >> $GITHUB_OUTPUT + + - name: "Perform SonarCloud static analysis" + uses: sonarsource/sonarqube-scan-action@v5.3.1 + env: + SONAR_TOKEN: ${{ inputs.sonar_token }} + with: + args: > + -Dsonar.organization=${{ inputs.sonar_organisation_key }} + -Dsonar.projectKey=${{ inputs.sonar_project_key }} + -Dsonar.branch.name=${{ github.ref_name }} + -Dsonar.python.coverage.reportPaths=${{ steps.coverage-files.outputs.files }} + -Dproject.settings=./scripts/config/sonar-scanner.properties + continue-on-error: true diff --git a/.github/workflows/artefacts-cleardown.yaml b/.github/workflows/artefacts-cleardown.yaml new file mode 100644 index 00000000..2204b116 --- /dev/null +++ b/.github/workflows/artefacts-cleardown.yaml @@ -0,0 +1,56 @@ +name: Cleardown Artefacts + +permissions: + id-token: write + contents: read +on: + workflow_call: + inputs: + environment: + description: "Defines the Github environment in which to pull environment variables from" + required: true + type: string + workspace: + description: "Name of the workspace" + required: true + type: string + workflow_timeout: + description: "Timeout duration in minutes" + required: false + default: 10 + type: number + artefact_bucket_name: + description: "The name of the s3 bucket holding domain artefacts" + required: true + type: string + type: + description: "The type of permissions (e.g., account, app)" + required: true + type: string + +jobs: + cleardown-artefacts: + name: "Cleardown redundant artefacts" + runs-on: ubuntu-latest + timeout-minutes: ${{ inputs.workflow_timeout }} + environment: ${{ inputs.environment }} + + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + ref: ${{ inputs.tag }} + + - name: "Configure AWS Credentials" + uses: ./.github/actions/configure-credentials + with: + aws_account_id: ${{ secrets.ACCOUNT_ID }} + aws_region: ${{ vars.AWS_REGION }} + type: ${{ inputs.type }} + environment: ${{ inputs.environment }} + + - name: "Cleardown redundant artefacts" + uses: ./.github/actions/artefact-cleardown + with: + workspace: ${{ inputs.workspace }} + artefact_bucket_name: ${{ inputs.artefact_bucket_name }} diff --git a/.github/workflows/build-project.yaml b/.github/workflows/build-project.yaml new file mode 100644 index 00000000..396f5666 --- /dev/null +++ b/.github/workflows/build-project.yaml @@ -0,0 +1,104 @@ +name: Build project workflow +run-name: Build ${{ inputs.type }} - ${{ inputs.name }} + +permissions: + id-token: write + contents: read +on: + workflow_call: + inputs: + build_type: + description: "The type of project to build (service, package)" + required: true + type: string + name: + description: "The name of the package to build" + required: true + type: string + python_version: + description: "The version of Python" + required: true + type: string + commit_hash: + description: "The commit hash, set by the CI/CD pipeline workflow" + required: false + type: string + environment: + description: "The deployment environment" + required: true + type: string + repo_name: + description: "The name of the Git repo" + required: true + type: string + workspace: + description: "The name of the workspace to deploy the infrastructure into" + required: true + type: string + application_tag: + description: "The application tag identifying the timeline in the repository to deploy from" + required: false + type: string + type: + description: "The type of permissions (e.g., account, app)" + required: true + type: string + release_build: + description: "Flag to indicate if this is a release build" + required: false + type: boolean + default: false + +jobs: + build-project: + name: "Build ${{ inputs.build_type }} - ${{ inputs.name }}" + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + + - name: "Configure AWS Credentials" + uses: ./.github/actions/configure-credentials + with: + aws_account_id: ${{ secrets.ACCOUNT_ID }} + aws_region: ${{ vars.AWS_REGION }} + type: ${{ inputs.type }} + environment: ${{ inputs.environment }} + + - name: "Set up Python" + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python_version }} + + - name: "Build project" + run: make build + env: + SERVICE: ${{ inputs.name }} + PACKAGE: ${{ inputs.name }} + COMMIT_HASH: ${{ inputs.commit_hash }} + ENVIRONMENT: ${{ inputs.environment }} + REPO_NAME: ${{ inputs.repo_name }} + WORKSPACE: ${{ inputs.workspace }} + APPLICATION_TAG: ${{ inputs.application_tag }} + RELEASE_BUILD: ${{ inputs.release_build }} + + - name: "Publish artefacts to S3" + run: make publish + env: + SERVICE: ${{ inputs.name }} + PACKAGE: ${{ inputs.name }} + COMMIT_HASH: ${{ inputs.commit_hash }} + ENVIRONMENT: ${{ inputs.environment }} + REPO_NAME: ${{ inputs.repo_name }} + WORKSPACE: ${{ inputs.workspace }} + APPLICATION_TAG: ${{ inputs.application_tag }} + RELEASE_BUILD: ${{ inputs.release_build }} + + - name: "Publish artefacts to GitHub" + uses: actions/upload-artifact@v6 + with: + name: ${{ inputs.name }}-${{ inputs.build_type }}-artefacts + path: src/lambda_function.zip + if-no-files-found: error diff --git a/.github/workflows/deploy-application-infrastructure.yaml b/.github/workflows/deploy-application-infrastructure.yaml new file mode 100644 index 00000000..25b440c5 --- /dev/null +++ b/.github/workflows/deploy-application-infrastructure.yaml @@ -0,0 +1,165 @@ +name: Deploy application infrastructure workflow + +permissions: + id-token: write + contents: read +on: + workflow_call: + inputs: + environment: + description: "The name of the environment to deploy the infrastructure into" + required: true + type: string + workspace: + description: "The name of the workspace to deploy the infrastructure into" + required: true + default: "default" + type: string + project: + description: "The project - eg dos or cm." + required: false + default: "dos" + type: string + tag: + description: "The git tag identifying the timeline in the repository to deploy from" + required: false + type: string + release_tag: + description: "The git tag identifying the timeline in the repository to deploy from" + required: false + type: string + application_tag: + description: "The application tag identifying the timeline in the repository to deploy from" + required: false + type: string + workflow_timeout: + description: "Timeout duration in minutes" + required: false + default: 5 + type: number + commit_hash: + description: "The commit hash, set by the CI/CD pipeline workflow" + required: false + type: string + type: + description: "The type of permissions (e.g., account, app)" + required: false + default: "app" + type: string + stacks: + description: "A list of the infrastructure stacks to deploy from the domain. If not supplied, no infrastructure will be deployed" + required: false + default: "['triage']" + type: string + secrets: + ACCOUNT_ID: + description: "AWS account ID for credentials" + required: true + MGMT_ACCOUNT_ID: + description: "AWS management account ID for credentials" + required: true + AWS_ACCOUNT_ID_DEV: + description: "AWS dev account ID for credentials" + required: true + outputs: + plan_result: + description: "The Terraform plan output" + value: ${{ jobs.plan-application-infrastructure.outputs.plan_result }} + deploy_status: + description: "The status of the deployment" + value: ${{ jobs.deploy_summary.outputs.deploy_status }} + + +jobs: + plan-application-infrastructure: + name: "Plan application infrastructure deployment to ${{ inputs.environment }} " + concurrency: + group: "${{ inputs.environment }}-${{ inputs.tag || inputs.workspace}}" + cancel-in-progress: false + uses: ./.github/workflows/deploy-infrastructure.yaml + with: + environment: ${{ inputs.environment }} + workspace: ${{ inputs.workspace }} + stacks: ${{ inputs.stacks }} + application_tag: ${{ inputs.application_tag }} + commit_hash: ${{ inputs.commit_hash }} + tag: ${{ inputs.tag }} + release_tag: ${{ inputs.release_tag }} + action: plan + type: ${{ inputs.type }} + workflow_timeout: ${{ inputs.workflow_timeout }} + secrets: + ACCOUNT_ID: ${{ secrets.ACCOUNT_ID }} + MGMT_ACCOUNT_ID: ${{ secrets.MGMT_ACCOUNT_ID }} + AWS_ACCOUNT_ID_DEV: ${{ secrets.AWS_ACCOUNT_ID_DEV }} + + manual-approval-application-infra: + name: "Manual approval for deployment of application infrastructure to the ${{ inputs.environment }} environment" + if: ${{ needs.plan-application-infrastructure.outputs.plan_result == 'true' }} + needs: + - plan-application-infrastructure + runs-on: ubuntu-latest + environment: "${{ inputs.environment }}-protected" + outputs: + approved: ${{ steps.log-approval.outputs.approved }} + steps: + - name: Approval required + run: echo "${{ inputs.environment }} deployment paused for manual approval. Please approve in the Actions tab." + - name: Log approval + id: log-approval + run: echo "approved=true" >> $GITHUB_OUTPUT + + apply-application-infrastructure: + name: "Apply application infrastructure deployment to ${{ inputs.environment }}" + concurrency: + group: "${{ inputs.environment }}-${{ inputs.tag || inputs.workspace}}" + cancel-in-progress: false + needs: + - manual-approval-application-infra + uses: ./.github/workflows/deploy-infrastructure.yaml + with: + environment: ${{ inputs.environment }} + workspace: ${{ inputs.workspace }} + stacks: ${{ inputs.stacks }} + application_tag: ${{ inputs.application_tag }} + commit_hash: ${{ inputs.commit_hash }} + tag: ${{ inputs.tag }} + release_tag: ${{ inputs.release_tag }} + action: apply + type: ${{ inputs.type }} + workflow_timeout: ${{ inputs.workflow_timeout }} + secrets: inherit + + deploy_summary: + name: "Summarise deployment of application infrastructure to ${{ inputs.environment }} environment" + needs: + - plan-application-infrastructure + - manual-approval-application-infra + - apply-application-infrastructure + if: always() && !cancelled() + runs-on: ubuntu-latest + outputs: + deploy_status: ${{ steps.deployment_summary.outputs.deploy_status }} + steps: + - name: Deployment Summary + id: deployment_summary + run: | + if [ ${{ needs.manual-approval-application-infra.result }} == 'skipped' ]; then + echo "Plan identified no changes for application infrastructure in the ${{ inputs.environment }} environment." + echo "deploy_status=Success" >> $GITHUB_OUTPUT + else + if [ ${{ needs.plan-application-infrastructure.outputs.plan_result }} == 'true' ]; then + if [ ${{ needs.manual-approval-application-infra.outputs.approved }} == 'true' ]; then + if [ "${{ needs.apply-application-infrastructure.result }}" == "success" ]; then + echo "Changes APPROVED and successfully APPLIED to application infrastructure in the ${{ inputs.environment }} environment." + echo "deploy_status=Success" >> $GITHUB_OUTPUT + else + echo "Changes APPROVED but were NOT successfully applied to application infrastructure in the ${{ inputs.environment }} environment." + echo "deploy_status=Failed" >> $GITHUB_OUTPUT + fi + else + echo "Planned changes to application infrastructure REJECTED for deployment to the ${{ inputs.environment }} environment." + echo "deploy_status=Rejected" >> $GITHUB_OUTPUT + fi + fi + fi diff --git a/.github/workflows/deploy-data-migration-project.yaml b/.github/workflows/deploy-data-migration-project.yaml new file mode 100644 index 00000000..305f93bd --- /dev/null +++ b/.github/workflows/deploy-data-migration-project.yaml @@ -0,0 +1,88 @@ +name: Deploy data migration project workflow +run-name: Deploy ${{ inputs.build_type }} - ${{ inputs.name }} +permissions: + id-token: write + contents: read +on: + workflow_call: + inputs: + build_type: + description: "The type of project to build (service, package)" + required: true + type: string + name: + description: "The name of the package to build" + required: true + type: string + python_version: + description: "The version of Python" + required: true + type: string + commit_hash: + description: "The commit hash, set by the CI/CD pipeline workflow" + required: false + type: string + environment: + description: "The deployment environment" + required: true + type: string + repo_name: + description: "The name of the Git repo" + required: true + type: string + workspace: + description: "The name of the workspace to deploy the infrastructure into" + required: true + type: string + application_tag: + description: "The application tag identifying the timeline in the repository to deploy from" + required: false + type: string + type: + description: "The type of permissions (e.g., account, app)" + required: true + type: string + +jobs: + deploy-project: + name: "Deploy step for ${{ inputs.build_type }} - ${{ inputs.name }}" + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + + - name: "Configure AWS Credentials" + uses: ./.github/actions/configure-credentials + with: + aws_account_id: ${{ secrets.ACCOUNT_ID }} + aws_region: ${{ vars.AWS_REGION }} + type: ${{ inputs.type }} + environment: ${{ inputs.environment }} + + - name: "Get directories" + id: get-directories + run: | + if [[ "${{ inputs.build_type }}" == "service" ]]; then + echo "working_directory=services/${{ inputs.name }}" >> $GITHUB_OUTPUT + echo "build_directory=build/services/${{ inputs.name }}" >> $GITHUB_OUTPUT + elif [[ "${{ inputs.build_type }}" == "package" ]]; then + echo "working_directory=application/packages/${{ inputs.name }}" >> $GITHUB_OUTPUT + echo "build_directory=build/packages/${{ inputs.name }}" >> $GITHUB_OUTPUT + else + echo "Invalid build type: ${{ inputs.build_type }}" + exit 1 + fi + + - name: "Invoke DMS Replication Tasks" + run: make invoke-replication-task + working-directory: ${{ steps.get-directories.outputs.working_directory }} + env: + SERVICE: ${{ inputs.name }} + PACKAGE: ${{ inputs.name }} + COMMIT_HASH: ${{ inputs.commit_hash }} + ENVIRONMENT: ${{ inputs.environment }} + REPO_NAME: ${{ inputs.repo_name }} + WORKSPACE: ${{ inputs.workspace }} + APPLICATION_TAG: ${{ inputs.application_tag }} diff --git a/.github/workflows/deploy-infrastructure.yaml b/.github/workflows/deploy-infrastructure.yaml index a61f32a2..72723f54 100644 --- a/.github/workflows/deploy-infrastructure.yaml +++ b/.github/workflows/deploy-infrastructure.yaml @@ -59,6 +59,16 @@ on: description: "The type of permissions (e.g., account, app)" required: true type: string + secrets: + ACCOUNT_ID: + description: "AWS account ID for credentials" + required: true + MGMT_ACCOUNT_ID: + description: "AWS management account ID for credentials" + required: true + AWS_ACCOUNT_ID_DEV: + description: "AWS dev account ID for credentials" + required: true outputs: plan_result: description: "The Terraform plan output" @@ -110,6 +120,7 @@ jobs: release_tag: ${{ inputs.release_tag }} commit_hash: ${{ inputs.commit_hash }} mgmt_account_id: ${{ secrets.MGMT_ACCOUNT_ID }} + account_id_dev: ${{ secrets.AWS_ACCOUNT_ID_DEV }} - name: "Upload Terraform Plan Artifact" uses: actions/upload-artifact@v6 diff --git a/.github/workflows/infrastructure-cleardown.yaml b/.github/workflows/infrastructure-cleardown.yaml new file mode 100644 index 00000000..5cd6d237 --- /dev/null +++ b/.github/workflows/infrastructure-cleardown.yaml @@ -0,0 +1,124 @@ +name: Cleardown Infrastructure + +permissions: + id-token: write + contents: read +on: + workflow_call: + inputs: + environment: + description: "Defines the Github environment in which to pull environment variables from" + required: true + type: string + workspace: + description: "Name of the workspace" + required: true + type: string + project: + description: "The project - eg dos or cm." + required: false + default: "dos" + type: string + stacks: + description: "Name of the stacks" + required: true + type: string + tag: + description: "Name of the tag" + required: false + type: string + workflow_timeout: + description: "Timeout duration in minutes" + required: false + default: 10 + type: number + application_tag: + description: "The application tag identifying the timeline in the repository to deploy from" + required: false + type: string + commit_hash: + description: "The commit hash, set by the CI/CD pipeline workflow" + required: false + type: string + type: + description: "The type of permissions (e.g., account, app)" + required: true + type: string + +jobs: + destroy-application-infrastructure: + uses: ./.github/workflows/deploy-infrastructure.yaml + with: + environment: ${{ inputs.environment }} + workspace: ${{ inputs.workspace }} + project: ${{ inputs.project }} + stacks: ${{ inputs.stacks }} + tag: ${{ inputs.tag }} + workflow_timeout: ${{ inputs.workflow_timeout }} + application_tag: ${{ inputs.application_tag }} + commit_hash: ${{ inputs.commit_hash }} + action: destroy + type: ${{ inputs.type }} + secrets: inherit + + delete-tf-state: + name: "Delete terraform state file" + runs-on: ubuntu-latest + timeout-minutes: ${{ inputs.workflow_timeout }} + environment: ${{ inputs.environment }} + strategy: + matrix: + stack: ${{ fromJSON(inputs.stacks) }} + needs: + - destroy-application-infrastructure + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + ref: ${{ inputs.tag }} + + - name: "Configure AWS Credentials" + uses: ./.github/actions/configure-credentials + with: + aws_account_id: ${{ secrets.ACCOUNT_ID }} + aws_region: ${{ vars.AWS_REGION }} + type: ${{ inputs.type }} + environment: ${{ inputs.environment }} + + - name: "Delete terraform state file" + id: delete_tf_state + uses: ./.github/actions/cleardown-tf-state + with: + workspace: ${{ inputs.workspace }} + environment: ${{ inputs.environment }} + stack: ${{ matrix.stack }} + + check-tf-state: + name: "Check state files cleared down" + runs-on: ubuntu-latest + timeout-minutes: ${{ inputs.workflow_timeout }} + environment: ${{ inputs.environment }} + + needs: + - delete-tf-state + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + ref: ${{ inputs.tag }} + + - name: "Configure AWS Credentials" + uses: ./.github/actions/configure-credentials + with: + aws_account_id: ${{ secrets.ACCOUNT_ID }} + aws_region: ${{ vars.AWS_REGION }} + type: ${{ inputs.type }} + environment: ${{ inputs.environment }} + + - name: "Check terraform state file" + id: check_tf_state + uses: ./.github/actions/check-tf-state + with: + workspace: ${{ inputs.workspace }} + environment: ${{ inputs.environment }} + stack: ${{ matrix.stack }} diff --git a/.github/workflows/metadata.yaml b/.github/workflows/metadata.yaml index dd1b6ce9..9a31de8f 100644 --- a/.github/workflows/metadata.yaml +++ b/.github/workflows/metadata.yaml @@ -22,9 +22,9 @@ on: reponame: description: "The name of the code repo" value: ${{ jobs.echo-metadata.outputs.reponame }} - # workspace: - # description: "The name of the workspace that we interacting with" - # value: ${{ jobs.derive-workspace.outputs.workspace }} + workspace: + description: "The name of the workspace that we interacting with" + value: ${{ jobs.derive-workspace.outputs.workspace }} artefact_bucket_name: description: "The s3 bucket for domain artefacts" value: ${{ jobs.echo-metadata.outputs.artefact_bucket_name }} @@ -96,21 +96,21 @@ jobs: id: get_metadata uses: ./.github/actions/metadata - # derive-workspace: - # name: "Derive workspace" - # runs-on: ubuntu-latest - # timeout-minutes: ${{ inputs.workflow_timeout }} - # outputs: - # workspace: ${{ steps.derive_workspace.outputs.workspace }} - # steps: - # - name: "Checkout code" - # uses: actions/checkout@v6 - # with: - # ref: ${{ inputs.tag }} + derive-workspace: + name: "Derive workspace" + runs-on: ubuntu-latest + timeout-minutes: ${{ inputs.workflow_timeout }} + outputs: + workspace: ${{ steps.derive_workspace.outputs.workspace }} + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + ref: ${{ inputs.tag }} - # - name: "Derive workspace" - # id: derive_workspace - # uses: ./.github/actions/derive-workspace + - name: "Derive workspace" + id: derive_workspace + uses: ./.github/actions/derive-workspace set-environment: name: "Set environment" diff --git a/.github/workflows/pipeline-deploy-account-infrastructure.yaml b/.github/workflows/pipeline-deploy-account-infrastructure.yaml index 76c9420a..d4b47d08 100644 --- a/.github/workflows/pipeline-deploy-account-infrastructure.yaml +++ b/.github/workflows/pipeline-deploy-account-infrastructure.yaml @@ -1,4 +1,4 @@ -name: Pipeline Deploy Account Level Infrastructure Pipeline +name: Pipeline Deploy Account Level Infrastructures Pipeline permissions: id-token: write @@ -9,12 +9,15 @@ on: branches: - "main" - "task/**" + - "develop" paths: - "infrastructure/stacks/terraform_management/**" - "infrastructure/modules/dynamodb/**" - "infrastructure/modules/s3/**" - "infrastructure/stacks/account_security/**" - "infrastructure/modules/shield/**" + - "infrastructure/stacks/account_wide/**" + - "infrastructure/stacks/artefact_management/**" workflow_run: workflows: ["Pipeline Deploy Policies Infrastructure"] types: @@ -45,6 +48,16 @@ on: description: "Deployment environment" required: true type: string + secrets: + ACCOUNT_ID: + description: "AWS account ID for credentials" + required: true + MGMT_ACCOUNT_ID: + description: "Management AWS account ID for credentials" + required: true + AWS_ACCOUNT_ID_DEV: + description: "AWS dev account ID" + required: true concurrency: group: account-infrastructure-${{ github.ref }} @@ -65,10 +78,13 @@ jobs: tag: ${{ inputs.tag }} environment: ${{ needs.metadata.outputs.environment }} workspace: "default" - stacks: "['terraform_management','account_security']" + stacks: "['terraform_management','account_security','account_wide','artefact_management']" type: account build_timestamp: ${{ needs.metadata.outputs.build_timestamp }} - secrets: inherit + secrets: + ACCOUNT_ID: ${{ secrets.ACCOUNT_ID }} + MGMT_ACCOUNT_ID: ${{ secrets.MGMT_ACCOUNT_ID }} + AWS_ACCOUNT_ID_DEV: ${{ secrets.AWS_ACCOUNT_ID_DEV }} plan-infrastructure: name: "Plan ${{ matrix.name }} infrastructure deployment for ${{ matrix.environment }}" @@ -87,10 +103,10 @@ jobs: stacks: "['account_security']" - name: "env" environment: ${{ needs.metadata.outputs.environment }} - stacks: "['terraform_management','account_wide','domain_name']" + stacks: "['terraform_management','account_wide']" - name: "mgmt" environment: ${{ needs.metadata.outputs.mgmt_environment }} - stacks: "['terraform_management','account_security']" + stacks: "['terraform_management','account_security','artefact_management']" uses: ./.github/workflows/deploy-infrastructure.yaml with: environment: ${{ matrix.environment }} @@ -99,7 +115,10 @@ jobs: stacks: ${{ matrix.stacks }} action: plan type: account - secrets: inherit + secrets: + ACCOUNT_ID: ${{ secrets.ACCOUNT_ID }} + MGMT_ACCOUNT_ID: ${{ secrets.MGMT_ACCOUNT_ID }} + AWS_ACCOUNT_ID_DEV: ${{ secrets.AWS_ACCOUNT_ID_DEV }} manual-approval: name: "Manual approval for ${{ needs.metadata.outputs.environment }} infrastructure deployment" @@ -118,7 +137,7 @@ jobs: concurrency: group: "${{ matrix.environment }}-default-${{ matrix.name }}-${{matrix.stacks}}" cancel-in-progress: false - if: github.ref == 'refs/heads/main' + if: ${{ github.ref == 'refs/heads/main' }} needs: - metadata - manual-approval @@ -131,7 +150,9 @@ jobs: stacks: "['account_security']" - name: "env" environment: ${{ needs.metadata.outputs.environment }} - stacks: "['terraform_management']" + - name: "mgmt" + environment: ${{ needs.metadata.outputs.mgmt_environment }} + stacks: "['terraform_management','account_security','artefact_management']" uses: ./.github/workflows/deploy-infrastructure.yaml with: environment: ${{ matrix.environment }} @@ -140,4 +161,7 @@ jobs: stacks: ${{ matrix.stacks }} action: apply type: account - secrets: inherit + secrets: + ACCOUNT_ID: ${{ secrets.ACCOUNT_ID }} + MGMT_ACCOUNT_ID: ${{ secrets.MGMT_ACCOUNT_ID }} + AWS_ACCOUNT_ID_DEV: ${{ secrets.AWS_ACCOUNT_ID_DEV }} diff --git a/.github/workflows/pipeline-deploy-application.yaml b/.github/workflows/pipeline-deploy-application.yaml index 4cc7028b..70f9a893 100644 --- a/.github/workflows/pipeline-deploy-application.yaml +++ b/.github/workflows/pipeline-deploy-application.yaml @@ -9,17 +9,18 @@ on: branches: - main - "task/**" + - "develop" - "dependabot/**" workflow_dispatch: # checkov:skip=CKV_GHA_7:Inputs reviewed and approved inputs: tag: description: "Specify the tag to be used for deployment" - required: true + required: false type: string application_tag: description: "Specify the application tag to be used for deployment" - required: true + required: false type: string environment: description: "Deployment environment" @@ -43,71 +44,43 @@ jobs: uses: ./.github/workflows/quality-checks.yaml with: environment: ${{ needs.metadata.outputs.environment }} - workspace: ${{ needs.metadata.outputs.workspace }} - stacks: "['database', 'crud_apis', 'data_migration', 'read_only_viewer', 'opensearch', 'etl_ods', 'dos_search', 'is_performance', 'ui']" + workspace: "default" + stacks: "['triage']" type: app build_timestamp: ${{ needs.metadata.outputs.build_timestamp }} - secrets: inherit - - build-services: - name: "Build ${{ matrix.name }}" - needs: - - metadata - - quality-checks - strategy: - matrix: - include: - - name: "python" - build_type: "package" - - name: "crud-apis" - build_type: "service" - - name: "data-migration" - build_type: "service" - - name: "read-only-viewer" - build_type: "service" - - name: "etl-ods" - build_type: "service" - - name: "dos-search" - build_type: "service" - - name: "dos-ui" - build_type: "service" - uses: ./.github/workflows/build-project.yaml - with: - name: ${{ matrix.name }} - build_type: ${{ matrix.build_type }} - python_version: ${{ needs.metadata.outputs.python_version }} - commit_hash: ${{ needs.metadata.outputs.commit_hash }} - environment: ${{ needs.metadata.outputs.mgmt_environment }} - repo_name: ${{ needs.metadata.outputs.reponame }} - workspace: ${{ needs.metadata.outputs.workspace }} - application_tag: ${{ inputs.application_tag || 'latest' }} - type: app - secrets: inherit - - build-sandbox-containers: - name: "Build container ${{ matrix.name }}" - needs: - - metadata - - quality-checks - strategy: - matrix: - include: - - name: "sandbox-dos-search" - uses: ./.github/workflows/build-sandbox-images.yaml - with: - name: ${{ matrix.name }} - commit_hash: ${{ needs.metadata.outputs.commit_hash }} - environment: ${{ needs.metadata.outputs.environment }} - application_tag: ${{ inputs.application_tag || 'latest' }} secrets: ACCOUNT_ID: ${{ secrets.ACCOUNT_ID }} - PROXYGEN_URL: ${{ secrets.PROXYGEN_URL }} + MGMT_ACCOUNT_ID: ${{ secrets.MGMT_ACCOUNT_ID }} + AWS_ACCOUNT_ID_DEV: ${{ secrets.AWS_ACCOUNT_ID_DEV }} + + # build-services: + # name: "Build ${{ matrix.name }}" + # needs: + # - metadata + # - quality-checks + # strategy: + # matrix: + # include: + # - name: "python" + # build_type: "package" + # uses: ./.github/workflows/build-project.yaml + # with: + # name: ${{ matrix.name }} + # build_type: ${{ matrix.build_type }} + # python_version: ${{ needs.metadata.outputs.python_version }} + # commit_hash: ${{ needs.metadata.outputs.commit_hash }} + # environment: ${{ needs.metadata.outputs.environment }} + # repo_name: ${{ needs.metadata.outputs.reponame }} + # workspace: ${{ needs.metadata.outputs.workspace }} + # application_tag: ${{ inputs.application_tag || 'latest' }} + # type: app + # secrets: inherit perform-code-analysis: name: "Perform static code analysis" needs: - metadata - - build-services + # - build-services uses: ./.github/workflows/static-code-analysis.yaml with: environment: ${{ needs.metadata.outputs.environment }} @@ -117,7 +90,7 @@ jobs: name: "Deploy application infrastructure to the ${{ needs.metadata.outputs.environment }} environment" needs: - metadata - - build-services + # - build-services - perform-code-analysis uses: ./.github/workflows/deploy-application-infrastructure.yaml with: @@ -127,195 +100,14 @@ jobs: tag: ${{ inputs.tag }} commit_hash: ${{ needs.metadata.outputs.commit_hash }} workflow_timeout: 30 - secrets: inherit - - deploy-proxy-to-apim: - name: "Deploy ${{ matrix.api_name }} API to Proxygen" - needs: - - metadata - - deploy-application-infrastructure - uses: ./.github/workflows/authenticate-and-deploy-to-apim.yaml - strategy: - matrix: - api_name: - - dos-search - # Add more API names here as needed - with: - environment: ${{ needs.metadata.outputs.environment }} - workspace: ${{ needs.metadata.outputs.workspace }} - api_name: ${{ matrix.api_name }} - apim_env: ${{ needs.metadata.outputs.environment == 'dev' && 'internal-dev' || needs.metadata.outputs.environment == 'test' && 'internal-qa' || needs.metadata.outputs.environment }} secrets: ACCOUNT_ID: ${{ secrets.ACCOUNT_ID }} - AWS_REGION: ${{ vars.AWS_REGION }} - PROXYGEN_URL: ${{ secrets.PROXYGEN_URL }} - - # TODO : FTRS-1899 Re-enable data migration step once ETL process is stable - # migrate-data: - # name: "Run ETL process in ${{ needs.metadata.outputs.environment }}" - # if: github.ref == 'refs/heads/main' && needs.metadata.outputs.environment != 'prod' - # concurrency: - # group: "${{ needs.metadata.outputs.environment }}-${{ needs.metadata.outputs.workspace }}" - # cancel-in-progress: false - # needs: - # - metadata - # - deploy-application-infrastructure - # uses: ./.github/workflows/migrate-data.yaml - # with: - # environment: ${{ needs.metadata.outputs.environment }} - # function_name: "ftrs-dos-${{ needs.metadata.outputs.environment }}-data-migration-queue-populator-lambda" - # queue_name: "ftrs-dos-${{ needs.metadata.outputs.environment }}-data-migration-dms-events" - # type: app - # secrets: inherit - - export-dynamodb-to-s3: - name: "Export DynamoDB tables to S3 in ${{ needs.metadata.outputs.environment }}" - if: github.ref == 'refs/heads/main' && needs.metadata.outputs.environment != 'prod' - concurrency: - group: "${{ needs.metadata.outputs.environment }}-${{ needs.metadata.outputs.workspace }}" - cancel-in-progress: false - needs: - - metadata - - deploy-application-infrastructure - # - migrate-data - uses: ./.github/workflows/manage-dynamodb-data.yaml - with: - environment: ${{ needs.metadata.outputs.environment }} - type: app - action: export - secrets: inherit - - restore-dynamodb-from-s3: - name: "Restore data from S3 to DynamoDB tables in ${{ needs.metadata.outputs.workspace }}" - if: needs.metadata.outputs.workspace != 'default' && needs.metadata.outputs.environment != 'prod' - needs: - - metadata - - deploy-application-infrastructure - uses: ./.github/workflows/manage-dynamodb-data.yaml - with: - environment: ${{ needs.metadata.outputs.environment }} - workspace: ${{ needs.metadata.outputs.workspace }} - action: import - type: app - secrets: inherit - - deploy-frontend-services: - name: "Deploy ${{ matrix.name }} to ${{ needs.metadata.outputs.environment }}" - concurrency: - group: "${{ needs.metadata.outputs.environment }}-${{ needs.metadata.outputs.workspace }}-${{ matrix.name }}" - cancel-in-progress: false - needs: - - metadata - - deploy-application-infrastructure - strategy: - matrix: - include: - - name: "dos-ui" - - name: "read-only-viewer" - uses: ./.github/workflows/deploy-frontend-project.yaml - with: - name: ${{ matrix.name }} - build_type: "service" - python_version: ${{ needs.metadata.outputs.python_version }} - commit_hash: ${{ needs.metadata.outputs.commit_hash }} - environment: ${{ needs.metadata.outputs.environment }} - repo_name: ${{ needs.metadata.outputs.reponame }} - workspace: ${{ needs.metadata.outputs.workspace }} - application_tag: ${{ inputs.application_tag || 'latest' }} - type: "app" - secrets: inherit - - deploy-data-migration-service: - name: "Deploy data migration service to ${{ needs.metadata.outputs.environment }}" - if: github.ref == 'refs/heads/main' && needs.metadata.outputs.environment != 'prod' - concurrency: - group: "${{ needs.metadata.outputs.environment }}-data-migration-${{ needs.metadata.outputs.workspace }}" - cancel-in-progress: false - needs: - - metadata - - deploy-application-infrastructure - uses: ./.github/workflows/deploy-data-migration-project.yaml - with: - name: "data-migration" - build_type: "service" - python_version: ${{ needs.metadata.outputs.python_version }} - commit_hash: ${{ needs.metadata.outputs.commit_hash }} - environment: ${{ needs.metadata.outputs.environment }} - repo_name: ${{ needs.metadata.outputs.reponame }} - workspace: ${{ needs.metadata.outputs.workspace }} - application_tag: ${{ inputs.application_tag || 'latest' }} - type: "app" - secrets: inherit - - service-automation-tests: - name: "Run service automation tests on ${{ needs.metadata.outputs.environment }}" - needs: - - metadata - - deploy-application-infrastructure - - restore-dynamodb-from-s3 - - export-dynamodb-to-s3 - if: | - always() && - !cancelled() && - ( - needs.restore-dynamodb-from-s3.result == 'success' || - ( - needs.restore-dynamodb-from-s3.result == 'skipped' && - (needs.export-dynamodb-to-s3.result == 'success' || needs.deploy-application-infrastructure.result == 'success') - ) - ) - uses: ./.github/workflows/service-automation-test.yaml - with: - environment: ${{ needs.metadata.outputs.environment }} - workspace: ${{ needs.metadata.outputs.workspace }} - commit_hash: ${{ needs.metadata.outputs.commit_hash }} - tag: ${{ inputs.tag }} - test_tag: "ftrs-pipeline" - test_type: "api" - type: app - secrets: inherit - - generate-prerelease: - name: "Generate prerelease tag" - needs: - - deploy-frontend-services - - service-automation-tests - if: > - always() && - github.event_name == 'push' && - github.ref == 'refs/heads/main' && - needs.deploy-frontend-services.result == 'success' && - needs.service-automation-tests.result == 'success' - uses: ./.github/workflows/generate-prerelease.yaml - secrets: inherit + MGMT_ACCOUNT_ID: ${{ secrets.MGMT_ACCOUNT_ID }} + AWS_ACCOUNT_ID_DEV: ${{ secrets.AWS_ACCOUNT_ID_DEV }} check-pipeline-status: name: "Check Pipeline Status" - needs: - - deploy-frontend-services - - deploy-data-migration-service - - service-automation-tests - - export-dynamodb-to-s3 + # needs: + # - deploy-data-migration-service if: always() uses: ./.github/workflows/pipeline-status-check.yaml - - slack-notifications: - name: "Send Notification to Slack" - needs: - - metadata - - quality-checks - - build-services - - deploy-application-infrastructure - # - migrate-data - - export-dynamodb-to-s3 - - restore-dynamodb-from-s3 - - deploy-frontend-services - - deploy-data-migration-service - - service-automation-tests - - generate-prerelease - - check-pipeline-status - if: always() - uses: ./.github/workflows/slack-notifications.yaml - with: - env: ${{ needs.metadata.outputs.environment }} - secrets: inherit diff --git a/.github/workflows/pipeline-deploy-policies.yaml b/.github/workflows/pipeline-deploy-policies.yaml index e383d5f0..5f681dd8 100644 --- a/.github/workflows/pipeline-deploy-policies.yaml +++ b/.github/workflows/pipeline-deploy-policies.yaml @@ -9,9 +9,10 @@ on: branches: - "main" - "task/**" + - "develop" paths: - "infrastructure/stacks/github_runner/**" - #- "infrastructure/stacks/account_policies/**" + - "infrastructure/stacks/account_policies/**" workflow_dispatch: # checkov:skip=CKV_GHA_7:Inputs reviewed and approved inputs: @@ -53,7 +54,7 @@ jobs: tag: ${{ inputs.tag }} environment: ${{ needs.metadata.outputs.environment }} workspace: "default" - stacks: "['github_runner']" + stacks: "['github_runner', 'account_policies']" type: account build_timestamp: ${{ needs.metadata.outputs.build_timestamp }} secrets: inherit @@ -72,10 +73,10 @@ jobs: include: - name: "account" environment: ${{ needs.metadata.outputs.account_type }} - stacks: "['github_runner']" + stacks: "['github_runner', 'account_policies']" - name: "mgmt" environment: ${{ needs.metadata.outputs.mgmt_environment }} - stacks: "['github_runner']" + stacks: "['github_runner','account_policies']" uses: ./.github/workflows/deploy-infrastructure.yaml with: environment: ${{ matrix.environment }} @@ -103,7 +104,7 @@ jobs: concurrency: group: "${{ matrix.environment }}-default-${{ matrix.name }}-permissions-${{matrix.stacks}}" cancel-in-progress: false - if: github.ref == 'refs/heads/main' + if: ${{ github.ref == 'refs/heads/main' }} needs: - metadata - manual-approval-permissions @@ -113,7 +114,10 @@ jobs: include: - name: "account" environment: ${{ needs.metadata.outputs.account_type }} - stacks: "['github_runner']" + stacks: "['github_runner', 'account_policies']" + - name: "mgmt" + environment: ${{ needs.metadata.outputs.mgmt_environment }} + stacks: "['github_runner','account_policies']" uses: ./.github/workflows/deploy-infrastructure.yaml with: environment: ${{ matrix.environment }} diff --git a/.github/workflows/pipeline-infrastructure-cleardown.yaml b/.github/workflows/pipeline-infrastructure-cleardown.yaml new file mode 100644 index 00000000..9ff7f227 --- /dev/null +++ b/.github/workflows/pipeline-infrastructure-cleardown.yaml @@ -0,0 +1,69 @@ +name: Cleardown Application Infrastructure Pipeline +# Intended to run if +# actor is not the queue bot and +# the branch deleted is either +# a task branch or +# a dependabot branch +permissions: + id-token: write + contents: read + +on: + delete: + repository_dispatch: + types: [release-build-cleardown] + workflow_dispatch: + # checkov:skip=CKV_GHA_7:Inputs reviewed and approved + inputs: + application_tag: + description: "Specify the application tag to cleardown" + required: true + default: "latest" + type: string + environment: + description: "Specify the environment to cleardown" + required: true + default: "dev" + type: choice + options: + - dev + - test + workspace: + description: "Specify the workspace to cleardown" + required: true + type: string + +jobs: + metadata: + if: github.actor != 'github-merge-queue[bot]' + name: "Get Metadata" + uses: ./.github/workflows/metadata.yaml + + cleardown-infrastructure: + name: "Cleardown Infrastructure" + needs: + - metadata + uses: ./.github/workflows/infrastructure-cleardown.yaml + with: + environment: ${{ github.event.client_payload.environment || inputs.environment || needs.metadata.outputs.environment }} + workspace: ${{ github.event.client_payload.workspace || inputs.workspace || needs.metadata.outputs.workspace }} + stacks: "['triage]" + application_tag: ${{ inputs.application_tag || github.event.client_payload.application_tag || 'latest' }} + commit_hash: ${{ needs.metadata.outputs.commit_hash }} + workflow_timeout: 30 + type: app + secrets: inherit + + cleardown-artefacts: + if: github.actor != 'github-actions[bot]' + name: "Cleardown Artefacts" + needs: + - metadata + - cleardown-infrastructure + uses: ./.github/workflows/artefacts-cleardown.yaml + with: + environment: ${{ inputs.environment || needs.metadata.outputs.environment }} + workspace: ${{ inputs.workspace || needs.metadata.outputs.workspace }} + artefact_bucket_name: "${{ needs.metadata.outputs.reponame }}-${{ needs.metadata.outputs.mgmt_environment || inputs.environment }}-artefacts-bucket" + type: app + secrets: inherit diff --git a/.github/workflows/pipeline-status-check.yaml b/.github/workflows/pipeline-status-check.yaml new file mode 100644 index 00000000..d5d5c5bd --- /dev/null +++ b/.github/workflows/pipeline-status-check.yaml @@ -0,0 +1,32 @@ +name: Pipeline outcome check + +permissions: {} +on: + workflow_call: + + +jobs: + check-pipeline-status: + name: "Check Pipeline Status" + runs-on: ubuntu-latest + steps: + - uses: martialonline/workflow-status@v4 + id: check + + - name: "Debug Info" + run: | + echo "Job Status: ${{ job.status }}" + echo "GitHub Event: ${{ github.event_name }}" + echo "Workflow Run: ${{ toJson(github.event.workflow_run) }}" + echo "Workflow Status: ${{ github.event.workflow_run.conclusion }}" + echo "GitHub Actor: ${{ github.actor }}" + echo "Pull Request URL: ${{ github.event.pull_request.html_url || github.event.head_commit.url }}" + echo "Status: ${{ steps.check.outputs.status }}" + + - name: "Fail if workflow not completed successfully" + if: ${{ steps.check.outputs.status != 'success' }} + run: | + echo "Workflow not completed successfully." + echo "Run ID: ${{ github.run_id }}" + exit 1 + diff --git a/.github/workflows/quality-checks.yaml b/.github/workflows/quality-checks.yaml index ff573363..54e4b329 100644 --- a/.github/workflows/quality-checks.yaml +++ b/.github/workflows/quality-checks.yaml @@ -40,6 +40,16 @@ on: description: "The build timestamp" required: false type: string + secrets: + ACCOUNT_ID: + description: "AWS account ID for credentials" + required: true + MGMT_ACCOUNT_ID: + description: "Management AWS account ID for credentials" + required: true + AWS_ACCOUNT_ID_DEV: + description: "AWS dev account ID" + required: true jobs: scan-secrets: @@ -163,7 +173,9 @@ jobs: workspace: ${{ inputs.workspace }} stack: ${{ matrix.stack }} action: validate + project: saet mgmt_account_id: ${{ secrets.MGMT_ACCOUNT_ID }} + account_id_dev: ${{ secrets.AWS_ACCOUNT_ID_DEV }} check-terraform-format: name: "Check Terraform format" diff --git a/.github/workflows/static-code-analysis.yaml b/.github/workflows/static-code-analysis.yaml new file mode 100644 index 00000000..b56a2172 --- /dev/null +++ b/.github/workflows/static-code-analysis.yaml @@ -0,0 +1,34 @@ +name: "Perform SonarCloud static analysis" +permissions: + id-token: write + contents: read +on: + workflow_call: + inputs: + environment: + description: "The deployment environment" + required: true + type: string + tag: + description: "The git tag to checkout or, if not passed in, the current branch" + required: false + type: string + +jobs: + static-analysis: + name: "Perform SonarCloud static analysis" + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + ref: ${{ inputs.tag }} + + - name: "Perform static analysis" + uses: ./.github/actions/perform-static-analysis + with: + sonar_organisation_key: ${{ vars.SONAR_ORGANISATION_KEY }} + sonar_project_key: ${{ vars.SONAR_PROJECT_KEY }} + sonar_token: ${{ secrets.SONAR_TOKEN }} diff --git a/.github/workflows/test-workflow.yaml b/.github/workflows/test-workflow.yaml deleted file mode 100644 index bf64bbb0..00000000 --- a/.github/workflows/test-workflow.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: test-workflow - -on: - workflow_dispatch: - -env: - AWS_REGION: "eu-west-2" - -permissions: - contents: read - id-token: write - -jobs: - saet-test-checks: - name: "test-workflow" - runs-on: ubuntu-latest - - steps: - - name: Git clone the repository - uses: actions/checkout@v6.0.1 - - - name: configure aws credentials - uses: aws-actions/configure-aws-credentials@v6.0.0 - with: - role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/saet-triage-api-dev-account-github-runner - role-session-name: GitHub_to_AWS_via_FederatedOIDC - aws-region: ${{ env.AWS_REGION }} - - - name: Create S3 bucket - run: | - aws s3api create-bucket --bucket saet-test-bucket-12345 --create-bucket-configuration LocationConstraint=eu-west-2 diff --git a/.gitignore b/.gitignore index 1f09ff50..1531a8b8 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,9 @@ tests/integration/release/*.zip package jmeter.log tmp-postman-env.json + +# Terraform +*.tfstate +*.tfstate.* +**/.terraform/* +**/.terraform.lock.hcl diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 120000 index 00000000..4932d523 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1 @@ +scripts/config/pre-commit.yaml \ No newline at end of file diff --git a/.tool-versions b/.tool-versions index b6d368b1..bf46170d 100644 --- a/.tool-versions +++ b/.tool-versions @@ -3,6 +3,7 @@ terraform 1.7.0 pre-commit 3.6.0 gitleaks 8.18.4 +vale 3.6.0 # ============================================================================== # The section below is reserved for Docker image versions. diff --git a/Makefile b/Makefile index bb631eeb..03e44749 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,11 @@ build: # Build the project artefact @Pipeline popd publish: # Publish the project artefact @Pipeline - # TODO: Implement the artefact publishing step + @echo "Publishing lambda_function.zip to S3 artifact bucket..." + @BUCKET_NAME="$${REPO_NAME}-$${ENVIRONMENT}-triage-artifact"; \ + echo "Target S3 bucket: $$BUCKET_NAME"; \ + aws s3 cp src/lambda_function.zip s3://$$BUCKET_NAME/lambda_function.zip + @echo "Successfully published lambda_function.zip" deploy-local: build # Deploy the project artefact to the target environment @Pipeline echo "Deploying to localstack" diff --git a/infrastructure/.gitignore b/infrastructure/.gitignore index 22ebdac3..7a79078c 100644 --- a/infrastructure/.gitignore +++ b/infrastructure/.gitignore @@ -15,7 +15,6 @@ crash.*.log # password, private keys, and other secrets. These should not be part of version # control as they are data points which are potentially sensitive and subject # to change depending on the environment. -*.tfvars *.tfvars.json # Ignore override files as they are usually used to override resources locally and so diff --git a/infrastructure/account_wide.tfvars b/infrastructure/account_wide.tfvars new file mode 100644 index 00000000..fcf22227 --- /dev/null +++ b/infrastructure/account_wide.tfvars @@ -0,0 +1,11 @@ +private_dedicated_network_acl = true +public_dedicated_network_acl = true + +vpc_flow_logs_bucket_name = "vpc-flow-logs" +subnet_flow_logs_bucket_name = "subnet-flow-logs" +flow_log_destination_type = "s3" +flow_log_file_format = "parquet" +flow_log_s3_versioning = false +flow_logs_s3_expiration_days = 10 + +enable_s3_kms_encryption = true diff --git a/infrastructure/common.tfvars b/infrastructure/common.tfvars new file mode 100644 index 00000000..53a67eea --- /dev/null +++ b/infrastructure/common.tfvars @@ -0,0 +1,14 @@ +project = "saet" +project_owner = "nhs-uec" +# service = "uec-saet" +# cost_centre = "P0675" +# data_type = "PCD" +# project_type = "Pilot" +#public_facing = "no" +# service_category = "bronze" +team_owner = "saet" + +artefacts_bucket_name = "artefacts-bucket" +s3_logging_bucket_name = "s3-access-logs" + +# rds_port = 5432 diff --git a/infrastructure/common/common-variables.tf b/infrastructure/common/common-variables.tf index d6ff628f..153b1d7a 100644 --- a/infrastructure/common/common-variables.tf +++ b/infrastructure/common/common-variables.tf @@ -7,11 +7,6 @@ variable "project_owner" { description = "The owner of the project, based on organisation and department codes" } -variable "project_name" { - description = "Project name" - type = string -} - variable "aws_region" { type = string default = "eu-west-2" @@ -76,6 +71,11 @@ variable "app_github_runner_role_name" { default = "app-github-runner" } +variable "artefacts_bucket_name" { + description = "Artefacts S3 bucket name" + type = string +} + variable "stack_name" { description = "The hyphenated version of the stack name used in names of resources defined in that stack" type = string diff --git a/infrastructure/common/locals.tf b/infrastructure/common/locals.tf index 53d683bc..e5a9cd2a 100644 --- a/infrastructure/common/locals.tf +++ b/infrastructure/common/locals.tf @@ -1,6 +1,7 @@ locals { account_id = data.aws_caller_identity.current.id workspace_suffix = "${terraform.workspace}" == "default" ? "" : "-${terraform.workspace}" + artefacts_bucket = "${var.repo_name}-mgmt-${var.artefacts_bucket_name}" project_prefix = "${var.project}-${var.environment}" resource_prefix = "${local.project_prefix}-${var.stack_name}" account_prefix = "${var.repo_name}-${var.environment}" @@ -27,3 +28,4 @@ locals { opensearch = "alias/${local.project_prefix}-opensearch-kms" } } + diff --git a/infrastructure/common/provider.tf b/infrastructure/common/provider.tf index cd0b0fde..4ab004e4 100644 --- a/infrastructure/common/provider.tf +++ b/infrastructure/common/provider.tf @@ -3,7 +3,7 @@ provider "aws" { default_tags { tags = { Environment = var.environment - Project = var.project_name + Project = var.project ManagedBy = "Terraform" } } diff --git a/infrastructure/environments/dev/account_security.tfvars b/infrastructure/environments/dev/account_security.tfvars new file mode 100644 index 00000000..cd2da3ef --- /dev/null +++ b/infrastructure/environments/dev/account_security.tfvars @@ -0,0 +1 @@ +enable_iam_analyzer = true diff --git a/infrastructure/environments/dev/account_wide.tfvars b/infrastructure/environments/dev/account_wide.tfvars new file mode 100644 index 00000000..d69d5cde --- /dev/null +++ b/infrastructure/environments/dev/account_wide.tfvars @@ -0,0 +1,20 @@ +vpc = { + name = "vpc" + cidr = "10.170.0.0/16" + + public_subnet_a = "10.170.0.0/21" + public_subnet_b = "10.170.8.0/21" + public_subnet_c = "10.170.16.0/21" + + private_subnet_a = "10.170.24.0/21" + private_subnet_b = "10.170.32.0/21" + private_subnet_c = "10.170.40.0/21" +} + +enable_flow_log = false +flow_log_s3_force_destroy = true + +# Single NAT Gateway +enable_nat_gateway = true +single_nat_gateway = true +one_nat_gateway_per_az = false diff --git a/infrastructure/environments/dev/environment.tfvars b/infrastructure/environments/dev/environment.tfvars new file mode 100644 index 00000000..60d846d6 --- /dev/null +++ b/infrastructure/environments/dev/environment.tfvars @@ -0,0 +1,3 @@ +# environment specific values that are applicable to more than one stack +environment = "dev" +# data_classification = "3" diff --git a/infrastructure/environments/dev/triage.tfvars b/infrastructure/environments/dev/triage.tfvars new file mode 100644 index 00000000..e69de29b diff --git a/infrastructure/environments/mgmt/account_security.tfvars b/infrastructure/environments/mgmt/account_security.tfvars new file mode 100644 index 00000000..cd2da3ef --- /dev/null +++ b/infrastructure/environments/mgmt/account_security.tfvars @@ -0,0 +1 @@ +enable_iam_analyzer = true diff --git a/infrastructure/environments/mgmt/account_wide.tfvars b/infrastructure/environments/mgmt/account_wide.tfvars new file mode 100644 index 00000000..d69d5cde --- /dev/null +++ b/infrastructure/environments/mgmt/account_wide.tfvars @@ -0,0 +1,20 @@ +vpc = { + name = "vpc" + cidr = "10.170.0.0/16" + + public_subnet_a = "10.170.0.0/21" + public_subnet_b = "10.170.8.0/21" + public_subnet_c = "10.170.16.0/21" + + private_subnet_a = "10.170.24.0/21" + private_subnet_b = "10.170.32.0/21" + private_subnet_c = "10.170.40.0/21" +} + +enable_flow_log = false +flow_log_s3_force_destroy = true + +# Single NAT Gateway +enable_nat_gateway = true +single_nat_gateway = true +one_nat_gateway_per_az = false diff --git a/infrastructure/environments/mgmt/environment.tfvars b/infrastructure/environments/mgmt/environment.tfvars new file mode 100644 index 00000000..50632e09 --- /dev/null +++ b/infrastructure/environments/mgmt/environment.tfvars @@ -0,0 +1,3 @@ +# environment specific values that are applicable to more than one stack +environment = "mgmt" +# data_classification = "3" diff --git a/infrastructure/environments/prod/environment.tfvars b/infrastructure/environments/prod/environment.tfvars new file mode 100644 index 00000000..e69de29b diff --git a/infrastructure/environments/test/environment.tfvars b/infrastructure/environments/test/environment.tfvars new file mode 100644 index 00000000..28c6f84d --- /dev/null +++ b/infrastructure/environments/test/environment.tfvars @@ -0,0 +1,11 @@ +aws_region = "eu-west-2" +environment = "test" +project_name = "saet" + +#Lambda +mem_size = 512 +runtime = "python3.13" +s3_key = "lambda_function.zip" + +#Rest API +stage_name = "beta" diff --git a/infrastructure/github_runner.tfvars b/infrastructure/github_runner.tfvars new file mode 100644 index 00000000..31a8fa56 --- /dev/null +++ b/infrastructure/github_runner.tfvars @@ -0,0 +1 @@ +github_org = "NHSDigital" diff --git a/infrastructure/local/versions.tf b/infrastructure/local/versions.tf index 06feaff2..292c49c0 100644 --- a/infrastructure/local/versions.tf +++ b/infrastructure/local/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "6.5.0" + version = "6.31.0" } } } diff --git a/infrastructure/modules/dynamodb/locals.tf b/infrastructure/modules/dynamodb/locals.tf new file mode 100644 index 00000000..350cb687 --- /dev/null +++ b/infrastructure/modules/dynamodb/locals.tf @@ -0,0 +1,6 @@ +# ============================================================================== +# Context + +locals { + workspace_suffix = "${terraform.workspace}" == "default" ? "" : "-${terraform.workspace}" +} diff --git a/infrastructure/modules/dynamodb/main.tf b/infrastructure/modules/dynamodb/main.tf index 10afc85d..8aada51c 100755 --- a/infrastructure/modules/dynamodb/main.tf +++ b/infrastructure/modules/dynamodb/main.tf @@ -1,27 +1,8 @@ -# resource "aws_dynamodb_table" "table" { -# name = var.table_name -# billing_mode = var.billing -# hash_key = var.hash_key -# range_key = var.range_key - -# attribute { -# name = var.hash_key -# type = var.hash_key_type -# } - -# dynamic "attribute" { -# for_each = var.range_key != null ? [var.range_key] : [] -# content { -# name = var.range_key -# type = var.hash_key_type -# } -# } -# } module "dynamodb_table" { - # Module version: 4.3.0 - source = "git::https://github.com/terraform-aws-modules/terraform-aws-dynamodb-table.git?ref=1ab93ca82023b72fe37de7f17cc10714867b2d4f" + # Module version: 5.5.0 + source = "git::https://github.com/terraform-aws-modules/terraform-aws-dynamodb-table.git?ref=45c9cb10c2f6209e7362bba92cadd5ab3c9e2003" - name = "${var.table_name}" + name = "${var.table_name}${local.workspace_suffix}" hash_key = var.hash_key range_key = var.range_key autoscaling_enabled = var.autoscaling_enabled diff --git a/infrastructure/modules/dynamodb/variables.tf b/infrastructure/modules/dynamodb/variables.tf index ee4462bb..35ac016e 100755 --- a/infrastructure/modules/dynamodb/variables.tf +++ b/infrastructure/modules/dynamodb/variables.tf @@ -74,7 +74,7 @@ variable "stream_enabled" { variable "stream_view_type" { description = "Determines the information written to the stream when an item is modified." type = string - default = "NEW_AND_OLD_IMAGES" + default = null } variable "attributes" { diff --git a/infrastructure/modules/kms/main.tf b/infrastructure/modules/kms/main.tf new file mode 100644 index 00000000..8e42220e --- /dev/null +++ b/infrastructure/modules/kms/main.tf @@ -0,0 +1,46 @@ +resource "aws_kms_key" "encryption_key" { + description = var.description + enable_key_rotation = var.enable_key_rotation + rotation_period_in_days = var.kms_rotation_period_in_days + policy = jsonencode({ + Version = "2012-10-17" + Statement = concat([ + { + "Sid" : "SetAccountRootPermissions", + "Effect" : "Allow", + "Principal" : { + "AWS" : "arn:aws:iam::${var.account_id}:root" + }, + "Action" : "kms:*", + "Resource" : "*" + }, + { + "Sid" : "AllowInAccountUseOfKmsKey", + "Effect" : "Allow", + "Principal" : { + "Service" : "${var.aws_service_name}" + }, + "Action" : [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey", + "kms:ListAliases", + "kms:ListKeys" + ], + "Resource" : "*", + "Condition" : { + "StringEquals" : { + "aws:SourceAccount" : var.account_id + } + } + } + ], var.additional_policy_statements) + }) +} + +resource "aws_kms_alias" "encryption_key_alias" { + name = var.alias_name + target_key_id = aws_kms_key.encryption_key.key_id +} diff --git a/infrastructure/modules/kms/outputs.tf b/infrastructure/modules/kms/outputs.tf new file mode 100644 index 00000000..e1ea9733 --- /dev/null +++ b/infrastructure/modules/kms/outputs.tf @@ -0,0 +1,7 @@ +output "key_id" { + value = aws_kms_key.encryption_key.key_id +} + +output "arn" { + value = aws_kms_key.encryption_key.arn +} diff --git a/infrastructure/modules/kms/variables.tf b/infrastructure/modules/kms/variables.tf new file mode 100644 index 00000000..b49fbff7 --- /dev/null +++ b/infrastructure/modules/kms/variables.tf @@ -0,0 +1,40 @@ +variable "description" { + description = "The description for the KMS key." + type = string + default = "KMS key managed by Terraform" +} + +variable "kms_rotation_period_in_days" { + description = "The number of days in the rotation period for the KMS key." + type = number + default = 365 +} + +variable "alias_name" { + description = "The alias name for the KMS key." + type = string + default = "" +} + +variable "account_id" { + description = "The AWS account ID where the KMS key will be created." + type = string + default = "" +} + +variable "aws_service_name" { + description = "The AWS service name that will be allowed to use the KMS key." + type = string +} + +variable "enable_key_rotation" { + description = "Whether to enable key rotation for the KMS key." + type = bool + default = true +} + +variable "additional_policy_statements" { + type = list(any) + default = [] + description = "Additional statements to add to the KMS key policy" +} diff --git a/infrastructure/modules/lambda/data.tf b/infrastructure/modules/lambda/data.tf new file mode 100644 index 00000000..4a8d12bc --- /dev/null +++ b/infrastructure/modules/lambda/data.tf @@ -0,0 +1,99 @@ +data "aws_iam_policy_document" "vpc_access_policy" { + # checkov:skip=CKV_AWS_111: TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-421 + # checkov:skip=CKV_AWS_356: TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-421 + statement { + sid = "AllowVpcAccess" + effect = "Allow" + actions = [ + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:AssignPrivateIpAddresses", + "ec2:UnassignPrivateIpAddresses" + ] + resources = [ + "*" + ] + } +} + +data "aws_iam_policy_document" "deny_lambda_function_access_policy" { + # checkov:skip=CKV_AWS_111: TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-421 + statement { + sid = "DenyLambdaFunctionAccess" + effect = "Deny" + actions = [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSubnets", + "ec2:DetachNetworkInterface", + "ec2:AssignPrivateIpAddresses", + "ec2:UnassignPrivateIpAddresses" + ] + resources = ["*"] + condition { + test = "ArnEquals" + variable = "lambda:SourceFunctionArn" + values = [ + "arn:aws:lambda:${var.aws_region}:${var.account_id}:function:${var.function_name}${local.workspace_suffix}" + ] + } + } +} + +data "aws_iam_policy_document" "allow_private_subnet_policy" { + # checkov:skip=CKV_AWS_111: TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-421 + statement { + sid = "AllowPrivateSubnetAccess" + effect = "Allow" + actions = [ + "lambda:CreateFunction", + "lambda:UpdateFunctionConfiguration" + ] + resources = ["*"] + condition { + test = "ForAllValues:StringEquals" + variable = "lambda:SubnetIds" + values = var.subnet_ids + } + } +} + +data "aws_iam_policy_document" "limit_to_environment_vpc_policy" { + # checkov:skip=CKV_AWS_111: TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-421 + + statement { + sid = "EnforceStayInSpecificVpc" + effect = "Allow" + actions = [ + "lambda:CreateFunction", + "lambda:UpdateFunctionConfiguration" + ] + resources = ["*"] + condition { + test = "StringEquals" + variable = "lambda:VpcIds" + values = [var.vpc_id] + } + } +} + +data "aws_iam_policy_document" "enforce_vpc_lambda_policy" { + # checkov:skip=CKV_AWS_111: TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-421 + statement { + sid = "EnforceVpcFunction" + effect = "Deny" + actions = [ + "lambda:CreateFunction", + "lambda:UpdateFunctionConfiguration" + ] + resources = ["*"] + condition { + test = "Null" + variable = "lambda:VpcIds" + values = ["true"] + } + } +} diff --git a/infrastructure/modules/lambda/locals.tf b/infrastructure/modules/lambda/locals.tf new file mode 100644 index 00000000..93069cfc --- /dev/null +++ b/infrastructure/modules/lambda/locals.tf @@ -0,0 +1,15 @@ + +# ============================================================================== +# Context + +locals { + workspace_suffix = "${terraform.workspace}" == "default" ? "" : "-${terraform.workspace}" + environment_workspace = "${terraform.workspace}" == "default" ? "" : "${terraform.workspace}" + additional_json_policies = (concat(var.policy_jsons, [ + data.aws_iam_policy_document.allow_private_subnet_policy.json, + data.aws_iam_policy_document.limit_to_environment_vpc_policy.json, + data.aws_iam_policy_document.enforce_vpc_lambda_policy.json, + data.aws_iam_policy_document.deny_lambda_function_access_policy.json, + data.aws_iam_policy_document.vpc_access_policy.json + ])) +} diff --git a/infrastructure/modules/lambda/main.tf b/infrastructure/modules/lambda/main.tf new file mode 100644 index 00000000..bcc2f640 --- /dev/null +++ b/infrastructure/modules/lambda/main.tf @@ -0,0 +1,37 @@ +#trivy:ignore:AVD-AWS-0066 +module "lambda" { + # Module version: 8.1.0 + source = "git::https://github.com/terraform-aws-modules/terraform-aws-lambda.git?ref=1c3b16a8d9ee8944ba33f5327bdf011c6639cceb" + + function_name = "${var.function_name}${local.workspace_suffix}" + handler = var.handler + runtime = var.runtime + publish = var.publish + attach_policy_jsons = var.attach_policy_jsons + number_of_policy_jsons = length(local.additional_json_policies) + attach_tracing_policy = var.attach_tracing_policy + tracing_mode = var.tracing_mode + description = var.description + policy_jsons = local.additional_json_policies + timeout = var.timeout + memory_size = var.memory_size + + create_package = var.create_package + local_existing_package = var.create_package ? var.local_existing_package : null + ignore_source_code_hash = var.ignore_source_code_hash + allowed_triggers = var.allowed_triggers + + s3_existing_package = var.create_package ? null : { + bucket = var.s3_bucket_name + key = var.s3_key + } + + vpc_subnet_ids = var.subnet_ids + vpc_security_group_ids = var.security_group_ids + + environment_variables = merge(var.environment_variables, { WORKSPACE = "${local.environment_workspace}" }) + layers = var.layers + + cloudwatch_logs_retention_in_days = var.cloudwatch_logs_retention + logging_system_log_level = var.cloudwatch_log_level +} diff --git a/infrastructure/modules/lambda/outputs.tf b/infrastructure/modules/lambda/outputs.tf new file mode 100644 index 00000000..d94ba89e --- /dev/null +++ b/infrastructure/modules/lambda/outputs.tf @@ -0,0 +1,34 @@ + +output "lambda_name" { + value = module.lambda.lambda_function_name +} + +output "lambda_arn" { + value = module.lambda.lambda_function_arn +} + +output "lambda_function_arn" { + value = module.lambda.lambda_function_arn +} + +output "lambda_function_invoke_arn" { + description = "The Invoke ARN of the Lambda Function" + value = module.lambda.lambda_function_invoke_arn +} + +output "lambda_function_name" { + value = module.lambda.lambda_function_name +} + +output "lambda_role_arn" { + value = module.lambda.lambda_role_arn +} + +output "lambda_cloudwatch_log_group_name" { + value = module.lambda.lambda_cloudwatch_log_group_name +} + +output "lambda_function_version" { + description = "Latest published version of Lambda Function" + value = module.lambda.lambda_function_version +} diff --git a/infrastructure/modules/lambda/variables.tf b/infrastructure/modules/lambda/variables.tf new file mode 100644 index 00000000..a8f602b4 --- /dev/null +++ b/infrastructure/modules/lambda/variables.tf @@ -0,0 +1,148 @@ +# ============================================================================== +# Mandatory variables + +variable "function_name" { + description = "The function name of the Lambda" +} + +variable "description" { + description = "The description of the Lambda" +} + +variable "policy_jsons" { + description = "List of JSON policies for Lambda" + default = [ + + ] +} + +# ============================================================================== +# Default variables +variable "handler" { + description = "Handler function entry point" + default = "app.lambda_handler" +} + +variable "runtime" { + description = "Runtime environment for the Lambda function" + default = "python3.12" +} + +variable "publish" { + description = "Whether to publish a new Lambda version on update" + default = true +} + +variable "create_package" { + description = "Whether to create a new ZIP package or use an existing one" + default = false +} + +variable "local_existing_package" { + description = "Path to the local ZIP file if using a pre-existing package" + default = "./misc/init.zip" +} + +variable "ignore_source_code_hash" { + description = "Whether to ignore changes to the source code hash" + default = true +} + +variable "attach_tracing_policy" { + default = false +} + +variable "tracing_mode" { + description = "Tracing configuration for the Lambda function" + type = string + default = "PassThrough" +} + +variable "attach_policy_jsons" { + description = "Whether to attach the provided JSON policies to the Lambda role" + default = true +} + +variable "number_of_policy_jsons" { + description = "Number of JSON policies to attach" + default = "1" +} + +variable "environment_variables" { + description = "Map of environment variables" + default = {} +} + +variable "layers" { + description = "The name of the Lambda layers" + default = [] +} + +variable "memory_size" { + description = "Amount of memory in MB your Lambda Function can use at runtime" + default = "512" +} + +variable "timeout" { + description = "Timeout of the lambda function in seconds" + default = "3" +} + +variable "subnet_ids" { + description = "List of subnet IDs for the Lambda function VPC configuration" + type = list(string) + default = null +} + +variable "security_group_ids" { + description = "List of security group IDs for the Lambda function VPC configuration" + type = list(string) + default = null +} + +variable "s3_bucket_name" { + description = "Name of the S3 bucket where the Lambda package is stored" +} + +variable "s3_key" { + description = "S3 key (path) to the Lambda package inside the S3 bucket" +} + +variable "allowed_triggers" { + description = "List of allowed triggers for the Lambda function" + type = map(any) + default = {} +} + +# variable "account_prefix" { +# description = "Prefix for the account resources, typically includes the repo name and environment" +# type = string +# } + +variable "account_id" { + description = "AWS account ID" + type = string +} + +variable "aws_region" { + description = "AWS region where the Lambda function will be deployed" + type = string +} + +variable "vpc_id" { + description = "Id of the VPC into which the Lambda function will be deployed" + type = string + default = null +} + +variable "cloudwatch_logs_retention" { + description = "Number of days to retain CloudWatch logs" + type = number + default = 30 +} + +variable "cloudwatch_log_level" { + description = "Logging level for CloudWatch logs" + type = string + default = "INFO" +} diff --git a/infrastructure/modules/s3/main.tf b/infrastructure/modules/s3/main.tf index e5c27706..b67b9041 100755 --- a/infrastructure/modules/s3/main.tf +++ b/infrastructure/modules/s3/main.tf @@ -18,7 +18,7 @@ module "s3" { # Module version: 5.7.0 source = "git::https://github.com/terraform-aws-modules/terraform-aws-s3-bucket.git?ref=c375418373496865e2770ad8aabfaf849d4caee5" - bucket = "${var.bucket_name}" + bucket = var.bucket_name attach_policy = var.attach_policy policy = var.policy lifecycle_rule = var.lifecycle_rule_inputs @@ -29,7 +29,14 @@ module "s3" { ignore_public_acls = true restrict_public_buckets = true - server_side_encryption_configuration = { + server_side_encryption_configuration = var.enable_kms_encryption ? { + rule = { + apply_server_side_encryption_by_default = { + sse_algorithm = "aws:kms" + kms_master_key_id = var.s3_encryption_key_arn # gitleaks:allow + } + } + } : { rule = { apply_server_side_encryption_by_default = { sse_algorithm = "AES256" @@ -48,3 +55,24 @@ module "s3" { website = var.website_map } + +resource "aws_s3_bucket_policy" "enforce_kms_truststore" { + count = var.enable_kms_encryption ? 1 : 0 + bucket = module.s3.s3_bucket_id + policy = jsonencode({ Version = "2012-10-17" + Statement = [ + { + Sid = "DenyUnencryptedUploads" + Effect = "Deny" + Principal = "*" + Action = "s3:PutObject" + Resource = "${module.s3.s3_bucket_arn}/*", + Condition = { + StringNotEquals = { + "s3:x-amz-server-side-encryption" : var.s3_encryption_key_arn + } + } + } + ] + }) +} diff --git a/infrastructure/modules/s3/variables.tf b/infrastructure/modules/s3/variables.tf index 8c9e65c9..a5fa0f58 100755 --- a/infrastructure/modules/s3/variables.tf +++ b/infrastructure/modules/s3/variables.tf @@ -70,3 +70,15 @@ variable "s3_logging_bucket" { type = string default = "" } + +variable "s3_encryption_key_arn" { + description = "The ARN of the KMS key to use for server-side encryption if required" + type = string + default = null +} + +variable "enable_kms_encryption" { + description = "Whether to enable server-side KMS encryption for the S3 bucket" + type = bool + default = false +} diff --git a/infrastructure/modules/shield/terraform.tf b/infrastructure/modules/shield/terraform.tf index e895794a..175ffbfc 100644 --- a/infrastructure/modules/shield/terraform.tf +++ b/infrastructure/modules/shield/terraform.tf @@ -2,7 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "6.5.0" + version = "6.31.0" } } } diff --git a/infrastructure/remote/versions.tf b/infrastructure/remote/versions.tf index 53ce97ec..5dcccbf9 100644 --- a/infrastructure/remote/versions.tf +++ b/infrastructure/remote/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "6.5.0" + version = "6.31.0" } } } diff --git a/infrastructure/stacks/account_policies/api_gateway_account.tf b/infrastructure/stacks/account_policies/api_gateway_account.tf new file mode 100644 index 00000000..d8d59103 --- /dev/null +++ b/infrastructure/stacks/account_policies/api_gateway_account.tf @@ -0,0 +1,7 @@ +resource "aws_api_gateway_account" "api_gateway_account" { + cloudwatch_role_arn = aws_iam_role.cloudwatch_api_gateway_role.arn + + depends_on = [ + aws_iam_role_policy_attachment.api_gateway_cloudwatch_policy_attachment + ] +} diff --git a/infrastructure/stacks/account_policies/iam_policies.tf b/infrastructure/stacks/account_policies/iam_policies.tf new file mode 100644 index 00000000..ccc10512 --- /dev/null +++ b/infrastructure/stacks/account_policies/iam_policies.tf @@ -0,0 +1,128 @@ +locals { + billing_ro_policy = jsondecode(file("${path.module}/policies/ro_billing.json")) + compute_rw_policy = jsondecode(file("${path.module}/policies/rw_compute.json")) + compute_ro_policy = jsondecode(file("${path.module}/policies/ro_compute.json")) + data_rw_policy = jsondecode(file("${path.module}/policies/rw_data.json")) + networking_rw_policy = jsondecode(file("${path.module}/policies/rw_networking.json")) + networking_ro_policy = jsondecode(file("${path.module}/policies/ro_networking.json")) + security_rw_policy = jsondecode(file("${path.module}/policies/rw_security.json")) + security_ro_policy = jsondecode(file("${path.module}/policies/ro_security.json")) + monitoring_rw_policy = jsondecode(file("${path.module}/policies/rw_monitoring.json")) + monitoring_ro_policy = jsondecode(file("${path.module}/policies/ro_monitoring.json")) + management_rw_policy = jsondecode(file("${path.module}/policies/rw_management.json")) + management_ro_policy = jsondecode(file("${path.module}/policies/ro_management.json")) + infrastructure_security_rw_policy = jsondecode(file("${path.module}/policies/rw_infrastructure_security.json")) + infrastructure_security_ro_policy = jsondecode(file("${path.module}/policies/ro_infrastructure_security.json")) + infrastructure_management_rw_policy = jsondecode(file("${path.module}/policies/rw_infrastructure_management.json")) + infrastructure_management_ro_policy = jsondecode(file("${path.module}/policies/ro_infrastructure_management.json")) + infrastructure_resilience_rw_policy = jsondecode(file("${path.module}/policies/rw_infrastructure_resilience.json")) + data_ro_policy = jsondecode(file("${path.module}/policies/ro_data.json")) +} + +resource "aws_iam_policy" "billing_policy_ro" { + name = "ro_billing" + description = "Read-only policies for aws billing services" + policy = jsonencode(local.billing_ro_policy) +} + +resource "aws_iam_policy" "compute_policy_rw" { + name = "rw_compute" + description = "Read-write policies for aws compute-related services" + policy = jsonencode(local.compute_rw_policy) +} + +resource "aws_iam_policy" "compute_policy_ro" { + name = "ro_compute" + description = "Read-only policies for aws compute-related services" + policy = jsonencode(local.compute_ro_policy) +} + +resource "aws_iam_policy" "data_rw" { + name = "rw_data" + description = "Read-write policies for aws data services" + policy = jsonencode(local.data_rw_policy) +} + +resource "aws_iam_policy" "data_ro" { + name = "ro_data" + description = "Read-only policies for aws data services" + policy = jsonencode(local.data_ro_policy) +} + +resource "aws_iam_policy" "networking_rw" { + name = "rw_networking" + description = "Read-write policies for aws networking services" + policy = jsonencode(local.networking_rw_policy) +} + +resource "aws_iam_policy" "networking_ro" { + name = "ro_networking" + description = "Read-only policies for aws networking services" + policy = jsonencode(local.networking_ro_policy) +} + +resource "aws_iam_policy" "security_rw" { + name = "rw_security" + description = "Read-write policies for aws security services" + policy = jsonencode(local.security_rw_policy) +} + +resource "aws_iam_policy" "security_ro" { + name = "ro_security" + description = "Read-only policies for aws security services" + policy = jsonencode(local.security_ro_policy) +} + +resource "aws_iam_policy" "monitoring_rw" { + name = "rw_monitoring" + description = "Read-write policies for aws monitoring services" + policy = jsonencode(local.monitoring_rw_policy) +} + +resource "aws_iam_policy" "monitoring_ro" { + name = "ro_monitoring" + description = "Read-only policies for aws monitoring services" + policy = jsonencode(local.monitoring_ro_policy) +} + +resource "aws_iam_policy" "management_rw" { + name = "rw_management" + description = "Read-write policies for aws management services" + policy = jsonencode(local.management_rw_policy) +} + +resource "aws_iam_policy" "management_ro" { + name = "ro_management" + description = "Read-only policies for aws management services" + policy = jsonencode(local.management_ro_policy) +} + +resource "aws_iam_policy" "infrastructure_security_rw" { + name = "rw_infrastructure_security" + description = "Read-write policies for aws infrastructure security services" + policy = jsonencode(local.infrastructure_security_rw_policy) +} + +resource "aws_iam_policy" "infrastructure_security_ro" { + name = "ro_infrastructure_security" + description = "Read-only policies for aws infrastructure security services" + policy = jsonencode(local.infrastructure_security_ro_policy) +} + +resource "aws_iam_policy" "infrastructure_management_rw" { + name = "rw_infrastructure_management" + description = "Read-write policies for aws infrastructure management services" + policy = jsonencode(local.infrastructure_management_rw_policy) +} + +resource "aws_iam_policy" "infrastructure_management_ro" { + name = "ro_infrastructure_management" + description = "Read-only policies for aws infrastructure management services" + policy = jsonencode(local.infrastructure_management_ro_policy) +} + +resource "aws_iam_policy" "infrastructure_resilience_rw" { + name = "rw_infrastructure_resilience" + description = "Read-write policies for aws resilience hub services" + policy = jsonencode(local.infrastructure_resilience_rw_policy) +} diff --git a/infrastructure/stacks/account_policies/iam_roles.tf b/infrastructure/stacks/account_policies/iam_roles.tf new file mode 100644 index 00000000..5f1e5c0d --- /dev/null +++ b/infrastructure/stacks/account_policies/iam_roles.tf @@ -0,0 +1,53 @@ +# resource "aws_iam_role" "dms_vpc_role" { +# name = "dms-vpc-role" +# description = "Allows DMS to manage VPC" +# assume_role_policy = jsonencode({ +# Version = "2012-10-17" +# Statement = [ +# { +# Effect = "Allow" +# Principal = { +# Service = "dms.amazonaws.com" +# } +# Action = "sts:AssumeRole" +# }, +# ] +# }) +# } + +# resource "aws_iam_role_policy_attachment" "dms_vpc_role_policy_attachment" { +# role = aws_iam_role.dms_vpc_role.name +# policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole" +# } + +# Create the service-linked role for Shield Advanced +# This ensures the role exists before we try to use Shield Advanced features +resource "aws_iam_service_linked_role" "shield" { + aws_service_name = "shield.amazonaws.com" + description = "Service-linked role for AWS Shield Advanced" +} + +# Role and policy for allowing the REST variant of the API Gateway to write logs to specifically named log groups +# in the account +resource "aws_iam_role" "cloudwatch_api_gateway_role" { + name = "${var.project}-api-gateway-cloudwatch" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +data "aws_iam_policy_document" "assume_role" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["apigateway.amazonaws.com"] + } + + actions = ["sts:AssumeRole"] + } +} + +resource "aws_iam_role_policy_attachment" "api_gateway_cloudwatch_policy_attachment" { + role = aws_iam_role.cloudwatch_api_gateway_role.id + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs" +} diff --git a/infrastructure/stacks/account_policies/policies/ro_billing.json b/infrastructure/stacks/account_policies/policies/ro_billing.json new file mode 100644 index 00000000..5b762840 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_billing.json @@ -0,0 +1,77 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "BillingReadOnly", + "Effect": "Allow", + "Action": [ + "account:GetAccountInformation", + "aws-portal:ViewBilling", + "billing:GetBillingData", + "billing:GetBillingDetails", + "billing:GetBillingNotifications", + "billing:GetBillingPreferences", + "billing:GetCredits", + "billing:GetContractInformation", + "billing:GetIAMAccessPreference", + "billing:GetSellerOfRecord", + "billing:ListBillingViews", + "budgets:ViewBudget", + "budgets:DescribeBudgetActionsForBudget", + "budgets:DescribeBudgetAction", + "budgets:DescribeBudgetActionsForAccount", + "budgets:DescribeBudgetActionHistories", + "ce:DescribeCostCategoryDefinition", + "ce:GetCostAndUsage", + "ce:ListCostCategoryDefinitions", + "ce:ListTagsForResource", + "ce:ListCostAllocationTags", + "ce:ListCostAllocationTagBackfillHistory", + "ce:GetTags", + "ce:GetDimensionValues", + "consolidatedbilling:ListLinkedAccounts", + "consolidatedbilling:GetAccountBillingRole", + "cur:GetClassicReport", + "cur:GetClassicReportPreferences", + "cur:GetUsageReport", + "cur:DescribeReportDefinitions", + "freetier:GetFreeTierAlertPreference", + "freetier:GetFreeTierUsage", + "invoicing:BatchGetInvoiceProfile", + "invoicing:GetInvoiceEmailDeliveryPreferences", + "invoicing:GetInvoicePDF", + "invoicing:GetInvoiceUnit", + "invoicing:ListInvoiceSummaries", + "invoicing:ListInvoiceUnits", + "invoicing:ListTagsForResource", + "mapcredits:ListQuarterSpend", + "mapcredits:ListAssociatedPrograms", + "mapcredits:ListQuarterCredits", + "payments:GetFinancingApplication", + "payments:GetFinancingLine", + "payments:GetFinancingLineWithdrawal", + "payments:GetFinancingOption", + "payments:GetPaymentInstrument", + "payments:GetPaymentStatus", + "payments:ListFinancingApplications", + "payments:ListFinancingLines", + "payments:ListFinancingLineWithdrawals", + "payments:ListPaymentInstruments", + "payments:ListPaymentPreferences", + "payments:ListPaymentProgramOptions", + "payments:ListPaymentProgramStatus", + "payments:ListTagsForResource", + "purchase-orders:GetPurchaseOrder", + "purchase-orders:ViewPurchaseOrders", + "purchase-orders:ListPurchaseOrderInvoices", + "purchase-orders:ListPurchaseOrders", + "purchase-orders:ListTagsForResource", + "sustainability:GetCarbonFootprintSummary", + "tax:GetTaxRegistrationDocument", + "tax:GetTaxInheritance", + "tax:ListTaxRegistrations" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_compute.json b/infrastructure/stacks/account_policies/policies/ro_compute.json new file mode 100644 index 00000000..8835147a --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_compute.json @@ -0,0 +1,33 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ComputeReadOnly", + "Effect": "Allow", + "Action": [ + "ec2:Describe*", + "ec2:GetSecurityGroupsForVpc", + "ec2:GetVerifiedAccessInstanceWebAcl", + "elasticloadbalancing:Describe*", + "autoscaling:Describe*", + "lambda:Get*", + "lambda:List*", + "states:DescribeStateMachine", + "states:ListStateMachines", + "tag:GetResources" + ], + "Resource": "*" + }, + { + "Sid": "RunAthenaLambdas", + "Effect": "Allow", + "Action": [ + "lambda:InvokeFunction" + ], + "Resource": [ + "arn:aws:lambda:*:*:function:athenafederatedcatalog_athena_federated_twr", + "arn:aws:lambda:*:*:function:athenafederatedcatalog_athena_federated_rds_twr" + ] + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_data.json b/infrastructure/stacks/account_policies/policies/ro_data.json new file mode 100644 index 00000000..bd4c8c01 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_data.json @@ -0,0 +1,110 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "DataPipelineReadOnlyAccess", + "Action": [ + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:QueryObjects" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Sid": "KinesisReadOnlyAccess", + "Action": [ + "kinesis:ListStreams", + "kinesis:DescribeStream", + "kinesis:DescribeStreamSummary" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Sid": "ResourceGroupsReadOnlyAccess", + "Effect": "Allow", + "Action": [ + "resource-groups:ListGroups", + "resource-groups:ListGroupResources", + "resource-groups:GetGroup", + "resource-groups:GetGroupQuery" + ], + "Resource": "*" + }, + { + "Sid": "DynamoReadOnly", + "Effect": "Allow", + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:Describe*", + "dynamodb:List*", + "dynamodb:GetAbacStatus", + "dynamodb:GetItem", + "dynamodb:GetResourcePolicy", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:PartiQLSelect", + "dax:Describe*", + "dax:List*", + "dax:GetItem", + "dax:BatchGetItem", + "dax:Query", + "dax:Scan" + ], + "Resource": "*" + }, + { + "Sid": "RDSReadOnly", + "Effect": "Allow", + "Action": [ + "rds:Describe*", + "rds:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "S3ReadOnly", + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*", + "s3:Describe*", + "s3-object-lambda:Get*", + "s3-object-lambda:List*" + ], + "Resource": "*" + }, + { + "Sid": "AthenaLimitedAccess", + "Effect": "Allow", + "Action": [ + "athena:Get*", + "athena:List*", + "athena:StartQueryExecution", + "athena:UpdateNamedQuery", + "athena:StopQueryExecution", + "athena:CreatePreparedStatement", + "athena:UpdatePreparedStatement", + "athena:CreateNamedQuery", + "athena:CancelQueryExecution", + "athena:BatchGetNamedQuery" + ], + "Resource": "*" + }, + { + "Sid": "DMSReadOnlyAccess", + "Action": [ + "dms:Describe*", + "dms:List*", + "dms:TestConnection", + "dms:DescribeReplicationTasks", + "dms:DescribeEndpoints" + ], + "Effect": "Allow", + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_infrastructure_management.json b/infrastructure/stacks/account_policies/policies/ro_infrastructure_management.json new file mode 100644 index 00000000..b46331e3 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_infrastructure_management.json @@ -0,0 +1,120 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "WAReadOnly", + "Effect": "Allow", + "Action": [ + "wellarchitected:Get*", + "wellarchitected:List*", + "wellarchitected:ExportLens" + ], + "Resource": "*" + }, + { + "Sid": "CloudformationReadOnly", + "Effect": "Allow", + "Action": [ + "cloudformation:Describe*", + "cloudformation:EstimateTemplateCost", + "cloudformation:Get*", + "cloudformation:List*", + "cloudformation:ValidateTemplate", + "cloudformation:Detect*" + ], + "Resource": "*" + }, + { + "Sid": "TrustedAdvisorReadOnly", + "Effect": "Allow", + "Action": [ + "trustedadvisor:DescribeAccount*", + "trustedadvisor:DescribeOrganization", + "trustedadvisor:DescribeRisk*", + "trustedadvisor:DownloadRisk", + "trustedadvisor:DescribeNotificationConfigurations" + ], + "Resource": "*" + }, + { + "Sid": "OrganizationsTrustAdvisorReadOnly", + "Effect": "Allow", + "Action": [ + "organizations:ListDelegatedAdministrators" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "organizations:ServicePrincipal": [ + "reporting.trustedadvisor.amazonaws.com" + ] + } + } + }, + { + "Sid": "OrganizationsHealthReadOnly", + "Effect": "Allow", + "Action": [ + "organizations:EnableAWSServiceAccess", + "organizations:DisableAWSServiceAccess" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "organizations:ServicePrincipal": "health.amazonaws.com" + } + } + }, + { + "Sid": "AWSHealthReadOnly", + "Effect": "Allow", + "Action": [ + "health:*" + ], + "Resource": "*" + }, + { + "Sid": "OrganizationsReadOnly", + "Effect": "Allow", + "Action": [ + "organizations:ListAccounts", + "organizations:ListParents", + "organizations:DescribeAccount", + "organizations:ListDelegatedAdministrators", + "organizations:DescribeOrganization", + "organizations:ListAWSServiceAccessForOrganization", + "organizations:DescribeOrganizationalUnit" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "health.amazonaws.com" + } + } + }, + { + "Sid": "SupportReadOnlyAccess", + "Effect": "Allow", + "Action": [ + "support:DescribeAttachment", + "support:DescribeCases", + "support:DescribeCommunications", + "support:DescribeServices", + "support:DescribeSeverityLevels", + "support:DescribeSupportLevel", + "support:DescribeTrustedAdvisorCheck", + "support:DescribeTrustedAdvisorCheckRefreshStatuses", + "support:DescribeTrustedAdvisorCheckResult", + "support:DescribeTrustedAdvisorCheckSummaries", + "support:DescribeTrustedAdvisorChecks", + "support:SearchForCases" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_infrastructure_security.json b/infrastructure/stacks/account_policies/policies/ro_infrastructure_security.json new file mode 100644 index 00000000..a6a4de5c --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_infrastructure_security.json @@ -0,0 +1,70 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CloudTrailReadOnly", + "Effect": "Allow", + "Action": [ + "cloudtrail:Describe*", + "cloudtrail:Get*", + "cloudtrail:List*", + "cloudtrail:LookupEvents" + ], + "Resource": "*" + }, + { + "Sid": "SecurityHubReadOnly", + "Effect": "Allow", + "Action": [ + "securityhub:BatchGet*", + "securityhub:Describe*", + "securityhub:Get*", + "securityhub:List*" + ], + "Resource": "*" + }, + { + "Sid": "Inspector2AndCodeGuruSecurityReadOnly", + "Effect": "Allow", + "Action": [ + "inspector2:BatchGet*", + "inspector2:Describe*", + "inspector2:Get*", + "inspector2:List*", + "inspector2:Search*", + "codeguru-security:BatchGetFindings", + "codeguru-security:GetAccountConfiguration" + ], + "Resource": "*" + }, + { + "Sid": "GuardDutyReadOnly", + "Effect": "Allow", + "Action": [ + "guardduty:Describe*", + "guardduty:Get*", + "guardduty:List*" + ], + "Resource": "*" + }, + { + "Sid": "FirewallManagerReadOnly", + "Effect": "Allow", + "Action": [ + "fms:Get*", + "fms:List*" + ], + "Resource": "*" + }, + { + "Sid": "KMSKeyReadOnly", + "Effect": "Allow", + "Action": [ + "kms:ListAliases", + "kms:ListKeys", + "kms:DescribeKey" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_management.json b/infrastructure/stacks/account_policies/policies/ro_management.json new file mode 100644 index 00000000..2e23ca29 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_management.json @@ -0,0 +1,144 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ApiGatewayReadOnly", + "Effect": "Allow", + "Action": "apigateway:GET", + "Resource": "arn:aws:apigateway:*::/*" + }, + { + "Sid": "EventBridgeAndSchedulerReadOnly", + "Effect": "Allow", + "Action": [ + "events:DescribeArchive", + "events:DescribeConnection", + "events:DescribeEndpoint", + "events:DescribeEventBus", + "events:DescribeEventSource", + "events:DescribeReplay", + "events:DescribeRule", + "events:ListApiDestinations", + "events:ListArchives", + "events:ListConnections", + "events:ListEndpoints", + "events:ListEventBuses", + "events:ListEventSources", + "events:ListReplays", + "events:ListRuleNamesByTarget", + "events:ListRules", + "events:ListTargetsByRule", + "events:TestEventPattern", + "pipes:DescribePipe", + "pipes:ListPipes", + "pipes:ListTagsForResource", + "scheduler:GetSchedule", + "scheduler:GetScheduleGroup", + "scheduler:ListScheduleGroups", + "scheduler:ListSchedules", + "scheduler:ListTagsForResource", + "schemas:DescribeCodeBinding", + "schemas:DescribeDiscoverer", + "schemas:DescribeRegistry", + "schemas:DescribeSchema", + "schemas:ExportSchema", + "schemas:GetCodeBindingSource", + "schemas:GetDiscoveredSchema", + "schemas:GetResourcePolicy", + "schemas:ListDiscoverers", + "schemas:ListRegistries", + "schemas:ListSchemaVersions", + "schemas:ListSchemas", + "schemas:ListTagsForResource", + "schemas:SearchSchemas" + ], + "Resource": "*" + }, + { + "Sid": "SSMReadOnly", + "Effect": "Allow", + "Action": [ + "ssm:Describe*", + "ssm:Get*", + "ssm:List*" + ], + "Resource": "*" + }, + { + "Sid": "OpenSearchServiceReadOnly", + "Effect": "Allow", + "Action": [ + "es:Describe*", + "es:Get*", + "es:List*", + "osis:Get*", + "osis:List*", + "opensearch:Describe*", + "opensearch:Get*", + "opensearch:List*", + "aoss:Describe*", + "aoss:Get*", + "aoss:List*" + ], + "Resource": "*" + }, + { + "Sid": "SQSReadOnly", + "Effect": "Allow", + "Action": [ + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListDeadLetterSourceQueues", + "sqs:ListMessageMoveTasks", + "sqs:ListQueueTags", + "sqs:ListQueues" + ], + "Resource": "*" + }, + { + "Sid": "SNSReadOnly", + "Effect": "Allow", + "Action": [ + "sns:CheckIfPhoneNumberIsOptedOut", + "sns:Get*", + "sns:List*" + ], + "Resource": "*" + }, + { + "Sid": "SecretsManagerReadOnly", + "Effect": "Allow", + "Action": [ + "secretsmanager:BatchGetSecretValue", + "secretsmanager:DescribeSecret", + "secretsmanager:GetRandomPassword", + "secretsmanager:GetResourcePolicy", + "secretsmanager:GetSecretValue", + "secretsmanager:ListSecretVersionIds", + "secretsmanager:ListSecrets" + ], + "Resource": "*" + }, + { + "Sid": "AppConfigReadOnly", + "Effect": "Allow", + "Action": [ + "appconfig:GetApplication", + "appconfig:GetEnvironment", + "appconfig:GetConfiguration", + "appconfig:GetConfigurationProfile", + "appconfig:GetDeployment", + "appconfig:GetDeploymentStrategy", + "appconfig:GetHostedConfigurationVersion", + "appconfig:ListApplications", + "appconfig:ListConfigurationProfiles", + "appconfig:ListDeployments", + "appconfig:ListDeploymentStrategies", + "appconfig:ListEnvironments", + "appconfig:ListHostedConfigurationVersions", + "appconfig:ListTagsForResource" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_monitoring.json b/infrastructure/stacks/account_policies/policies/ro_monitoring.json new file mode 100644 index 00000000..dbcace04 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_monitoring.json @@ -0,0 +1,57 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CloudWatchAndMonitoringReadOnly", + "Effect": "Allow", + "Action": [ + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "application-signals:BatchGet*", + "application-signals:Get*", + "application-signals:List*", + "cloudwatch:BatchGet*", + "cloudwatch:Describe*", + "cloudwatch:GenerateQuery", + "cloudwatch:Get*", + "cloudwatch:List*", + "cloudwatch:ListMetrics", + "cloudwatch:GetMetricStatistics", + "cloudwatch:GetMetricData", + "cloudwatch:DescribeAlarmHistory", + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:GetInsightRuleReport", + "logs:Describe*", + "logs:FilterLogEvents", + "logs:Get*", + "logs:List*", + "logs:StartLiveTail", + "logs:StartQuery", + "logs:StopLiveTail", + "logs:StopQuery", + "logs:TestMetricFilter", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "logs:DescribeQueries", + "logs:GetLogGroupFields", + "logs:GetLogRecord", + "logs:GetQueryResults", + "oam:ListSinks", + "rum:BatchGet*", + "rum:Get*", + "rum:List*", + "synthetics:Describe*", + "synthetics:Get*", + "synthetics:List*", + "xray:BatchGet*", + "xray:CancelTraceRetrieval", + "xray:Get*", + "xray:List*", + "xray:StartTraceRetrieval" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_networking.json b/infrastructure/stacks/account_policies/policies/ro_networking.json new file mode 100644 index 00000000..342233db --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_networking.json @@ -0,0 +1,40 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ACMCertificateReadOnly", + "Effect": "Allow", + "Action": [ + "acm:DescribeCertificate", + "acm:GetAccountConfiguration", + "acm:GetCertificate", + "acm:ListCertificates", + "acm:ListTagsForCertificate" + ], + "Resource": "*" + }, + { + "Sid": "CloudFrontReadOnly", + "Effect": "Allow", + "Action": [ + "cloudfront:Describe*", + "cloudfront:Get*", + "cloudfront:List*", + "cloudfront-keyvaluestore:Describe*", + "cloudfront-keyvaluestore:Get*", + "cloudfront-keyvaluestore:List*" + ], + "Resource": "*" + }, + { + "Sid": "Route53ReadOnly", + "Effect": "Allow", + "Action": [ + "route53:Get*", + "route53:List*", + "route53:TestDNSAnswer" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/ro_security.json b/infrastructure/stacks/account_policies/policies/ro_security.json new file mode 100644 index 00000000..2a412374 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/ro_security.json @@ -0,0 +1,148 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "IAMReadOnly", + "Effect": "Allow", + "Action": [ + "iam:Get*", + "iam:List*", + "iam:SimulateCustomPolicy", + "iam:SimulatePrincipalPolicy" + ], + "Resource": "*" + }, + { + "Sid": "WAFClassicReadOnly", + "Effect": "Allow", + "Action": [ + "waf:Get*", + "waf:List*", + "waf-regional:Get*", + "waf-regional:List*" + ], + "Resource": [ + "arn:aws:waf::*:bytematchset/*", + "arn:aws:waf::*:ipset/*", + "arn:aws:waf::*:ratebasedrule/*", + "arn:aws:waf::*:rule/*", + "arn:aws:waf::*:sizeconstraintset/*", + "arn:aws:waf::*:sqlinjectionset/*", + "arn:aws:waf::*:webacl/*", + "arn:aws:waf::*:xssmatchset/*", + "arn:aws:waf::*:regexmatch/*", + "arn:aws:waf::*:regexpatternset/*", + "arn:aws:waf::*:geomatchset/*", + "arn:aws:waf::*:rulegroup/*", + "arn:aws:waf:*:*:changetoken/*", + "arn:aws:waf-regional:*:*:bytematchset/*", + "arn:aws:waf-regional:*:*:ipset/*", + "arn:aws:waf-regional:*:*:ratebasedrule/*", + "arn:aws:waf-regional:*:*:rule/*", + "arn:aws:waf-regional:*:*:sizeconstraintset/*", + "arn:aws:waf-regional:*:*:sqlinjectionset/*", + "arn:aws:waf-regional:*:*:webacl/*", + "arn:aws:waf-regional:*:*:xssmatchset/*", + "arn:aws:waf-regional:*:*:regexmatch/*", + "arn:aws:waf-regional:*:*:regexpatternset/*", + "arn:aws:waf-regional:*:*:geomatchset/*", + "arn:aws:waf-regional:*:*:rulegroup/*", + "arn:aws:waf-regional:*:*:changetoken/*" + ] + }, + { + "Sid": "AllowWAFClassicGetWebACLForResource", + "Effect": "Allow", + "Action": [ + "waf-regional:GetWebACLForResource" + ], + "Resource": "arn:aws:waf-regional:*:*:*/*" + }, + { + "Sid": "AWSWAFV2ReadOnly", + "Effect": "Allow", + "Action": [ + "wafv2:Get*", + "wafv2:List*", + "wafv2:Describe*", + "wafv2:CheckCapacity" + ], + "Resource": [ + "arn:aws:wafv2:*:*:*/webacl/*/*", + "arn:aws:wafv2:*:*:*/ipset/*/*", + "arn:aws:wafv2:*:*:*/managedruleset/*/*", + "arn:aws:wafv2:*:*:*/rulegroup/*/*", + "arn:aws:wafv2:*:*:*/regexpatternset/*/*" + ] + }, + { + "Sid": "AllowListActionsForAppSync", + "Effect": "Allow", + "Action": [ + "appsync:ListGraphqlApis" + ], + "Resource": "*" + }, + { + "Sid": "AllowGetActionForCognito", + "Effect": "Allow", + "Action": [ + "cognito-idp:GetWebACLForResource" + ], + "Resource": "arn:aws:cognito-idp:*:*:userpool/*" + }, + { + "Sid": "AllowListActionsForCognito", + "Effect": "Allow", + "Action": [ + "cognito-idp:ListUserPools", + "cognito-idp:ListResourcesForWebACL" + ], + "Resource": "*" + }, + { + "Sid": "AllowGetActionForAppRunner", + "Effect": "Allow", + "Action": [ + "apprunner:DescribeWebAclForService" + ], + "Resource": "arn:aws:apprunner:*:*:service/*/*" + }, + { + "Sid": "AllowListActionsForAppRunner", + "Effect": "Allow", + "Action": [ + "apprunner:ListServices", + "apprunner:ListAssociatedServicesForWebAcl" + ], + "Resource": "*" + }, + { + "Sid": "AllowGetActionForAmplify", + "Effect": "Allow", + "Action": [ + "amplify:GetWebACLForResource" + ], + "Resource": "arn:aws:amplify:*:*:apps/*" + }, + { + "Sid": "AllowListActionsForAmplify", + "Effect": "Allow", + "Action": [ + "amplify:ListApps", + "amplify:ListResourcesForWebACL" + ], + "Resource": "*" + }, + { + "Sid": "ShieldReadOnly", + "Effect": "Allow", + "Action": [ + "shield:Describe*", + "shield:Get*", + "shield:List*" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_compute.json b/infrastructure/stacks/account_policies/policies/rw_compute.json new file mode 100644 index 00000000..b2345a89 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_compute.json @@ -0,0 +1,48 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ComputeFullAccess", + "Action": [ + "ec2:*", + "elasticloadbalancing:*", + "autoscaling:*", + "kms:ListAliases", + "lambda:*", + "states:DescribeStateMachine", + "states:ListStateMachines", + "tag:GetResources", + "tiros:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "autoscaling.amazonaws.com", + "ec2scheduled.amazonaws.com", + "elasticloadbalancing.amazonaws.com", + "spot.amazonaws.com", + "spotfleet.amazonaws.com", + "transitgateway.amazonaws.com" + ] + } + } + }, + { + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:PassedToService": "lambda.amazonaws.com" + } + } + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_data.json b/infrastructure/stacks/account_policies/policies/rw_data.json new file mode 100644 index 00000000..55894853 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_data.json @@ -0,0 +1,149 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "DataPipelineFullAccess", + "Action": [ + "datapipeline:ActivatePipeline", + "datapipeline:CreatePipeline", + "datapipeline:DeletePipeline", + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:PutPipelineDefinition", + "datapipeline:QueryObjects" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Sid": "KinesisFullAccess", + "Action": [ + "kinesis:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Sid": "ResourceGroupsFullAccess", + "Effect": "Allow", + "Action": [ + "resource-groups:ListGroups", + "resource-groups:ListGroupResources", + "resource-groups:GetGroup", + "resource-groups:GetGroupQuery", + "resource-groups:DeleteGroup", + "resource-groups:CreateGroup" + ], + "Resource": "*" + }, + { + "Sid": "DynamoFullAccess", + "Effect": "Allow", + "Action": [ + "dynamodb:*", + "dax:*" + ], + "Resource": "*" + }, + { + "Sid": "PassRoleForScalingAndDAX", + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": "*", + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "application-autoscaling.amazonaws.com", + "application-autoscaling.amazonaws.com.cn", + "dax.amazonaws.com" + ] + } + } + }, + { + "Sid": "CreateServiceLinkedRoleForDynamoAndDAX", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "replication.dynamodb.amazonaws.com", + "dax.amazonaws.com", + "dynamodb.application-autoscaling.amazonaws.com", + "contributorinsights.dynamodb.amazonaws.com", + "kinesisreplication.dynamodb.amazonaws.com" + ] + } + } + }, + { + "Sid": "RDSFullAccess", + "Effect": "Allow", + "Action": [ + "rds:*" + ], + "Resource": "*" + }, + { + "Sid": "DMSFullAccess", + "Effect": "Allow", + "Action": [ + "dms:*" + ], + "Resource": "*" + }, + { + "Sid": "PerformanceInsightsFullAccess", + "Effect": "Allow", + "Action": "pi:*", + "Resource": [ + "arn:aws:pi:*:*:metrics/rds/*", + "arn:aws:pi:*:*:perf-reports/rds/*" + ] + }, + { + "Sid": "CreateServiceLinkedRoleForRDS", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "rds.amazonaws.com", + "rds.application-autoscaling.amazonaws.com" + ] + } + } + }, + { + "Sid": "S3FullAccess", + "Effect": "Allow", + "Action": [ + "s3:*", + "s3-object-lambda:*" + ], + "Resource": "*" + }, + { + "Sid": "GlueFullAccess", + "Effect": "Allow", + "Action": "glue:*", + "Resource": "*" + }, + { + "Sid": "AthenaFullAccess", + "Effect": "Allow", + "Action": "athena:*", + "Resource": "*" + }, + { + "Sid": "QuicksiteFullAccess", + "Effect": "Allow", + "Action": "quicksight:*", + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_infrastructure_management.json b/infrastructure/stacks/account_policies/policies/rw_infrastructure_management.json new file mode 100644 index 00000000..08a936fd --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_infrastructure_management.json @@ -0,0 +1,150 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "WAReadFullAccess", + "Effect": "Allow", + "Action": [ + "wellarchitected:*" + ], + "Resource": "*" + }, + { + "Sid": "CloudformationFullAccess", + "Effect": "Allow", + "Action": [ + "cloudformation:*" + ], + "Resource": "*" + }, + { + "Sid": "TrustedAdvisorFullAccess", + "Effect": "Allow", + "Action": [ + "trustedadvisor:*" + ], + "Resource": "*" + }, + { + "Sid": "OrganizationsTrustAdvisorReadOnly", + "Effect": "Allow", + "Action": [ + "organizations:ListDelegatedAdministrators", + "organizations:EnableAWSServiceAccess", + "organizations:DisableAWSServiceAccess", + "organizations:RegisterDelegatedAdministrator", + "organizations:DeregisterDelegatedAdministrator" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "organizations:ServicePrincipal": [ + "reporting.trustedadvisor.amazonaws.com" + ] + } + } + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/reporting.trustedadvisor.amazonaws.com/AWSServiceRoleForTrustedAdvisorReporting", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "reporting.trustedadvisor.amazonaws.com" + } + } + }, + { + "Sid": "OrganizationsHealthReadOnly", + "Effect": "Allow", + "Action": [ + "organizations:EnableAWSServiceAccess", + "organizations:DisableAWSServiceAccess" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "organizations:ServicePrincipal": "health.amazonaws.com" + } + } + }, + { + "Sid": "AWSHealthReadOnly", + "Effect": "Allow", + "Action": [ + "health:*" + ], + "Resource": "*" + }, + { + "Sid": "OrganizationsReadOnly", + "Effect": "Allow", + "Action": [ + "organizations:ListParents", + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:DescribeOrganizationalUnit", + "organizations:ListAccounts", + "organizations:ListAWSServiceAccessForOrganization", + "organizations:ListDelegatedAdministrators" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "health.amazonaws.com" + } + } + }, + { + "Sid": "ServiceQuotasAccess", + "Effect": "Allow", + "Action": [ + "servicequotas:ListServices", + "servicequotas:ListServiceQuotas", + "servicequotas:ListAWSDefaultServiceQuotas", + "servicequotas:ListRequestedServiceQuotaChangeHistory", + "servicequotas:ListRequestedServiceQuotaChangeHistoryByQuota", + "servicequotas:ListTagsForResource", + "servicequotas:GetServiceQuota", + "servicequotas:GetAWSDefaultServiceQuota", + "servicequotas:GetRequestedServiceQuotaChange", + "servicequotas:RequestServiceQuotaIncrease", + "servicequotas:CreateSupportCase" + ], + "Resource": "*" + }, + { + "Sid": "SupportFullAccess", + "Effect": "Allow", + "Action": [ + "support:*", + "support-console:*" + ], + "Resource": "*" + } + , + { + "Sid": "IAMAccessAnalyzerFullAccess", + "Effect": "Allow", + "Action": [ + "access-analyzer:*" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "access-analyzer.amazonaws.com" + } + } + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_infrastructure_resilience.json b/infrastructure/stacks/account_policies/policies/rw_infrastructure_resilience.json new file mode 100644 index 00000000..0f11505f --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_infrastructure_resilience.json @@ -0,0 +1,49 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AWSResilienceHubFullAccess", + "Effect": "Allow", + "Action": [ + "backup:DescribeBackupVault", + "backup:GetBackupPlan", + "backup:GetBackupSelection", + "backup:ListBackupPlans", + "backup:ListBackupSelections", + "datasync:DescribeTask", + "datasync:ListLocations", + "datasync:ListTasks", + "devops-guru:ListMonitoredResources", + "dlm:GetLifecyclePolicies", + "dlm:GetLifecyclePolicy", + "drs:DescribeJobs", + "drs:DescribeSourceServers", + "drs:GetReplicationConfiguration", + "ds:DescribeDirectories", + "fis:GetExperiment", + "fis:GetExperimentTemplate", + "fis:ListExperimentTemplates", + "fis:ListExperiments", + "fis:ListExperimentResolvedTargets", + "fsx:DescribeFileSystems", + "route53-recovery-control-config:ListClusters", + "route53-recovery-control-config:ListControlPanels", + "route53-recovery-control-config:ListRoutingControls", + "route53-recovery-readiness:GetReadinessCheckStatus", + "route53-recovery-readiness:GetResourceSet", + "route53-recovery-readiness:ListReadinessChecks", + "servicecatalog:GetApplication", + "servicecatalog:ListAssociatedResources" + ], + "Resource": "*" + }, + { + "Sid": "AWSResilienceHubSSMStatement", + "Effect": "Allow", + "Action": [ + "ssm:GetParametersByPath" + ], + "Resource": "arn:aws:ssm:*:*:parameter/ResilienceHub/*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_infrastructure_security.json b/infrastructure/stacks/account_policies/policies/rw_infrastructure_security.json new file mode 100644 index 00000000..e53c9e98 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_infrastructure_security.json @@ -0,0 +1,126 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CloudTrailFullAccess", + "Effect": "Allow", + "Action": "cloudtrail:*", + "Resource": "*" + }, + { + "Sid": "SecurityHubFullAccess", + "Effect": "Allow", + "Action": "securityhub:*", + "Resource": "*" + }, + { + "Sid": "Inspector2AndCodeGuruSecurityFullAccess", + "Effect": "Allow", + "Action": [ + "inspector2:*", + "codeguru-security:*" + ], + "Resource": "*" + }, + { + "Sid": "GuardDutyFullAccess", + "Effect": "Allow", + "Action": [ + "guardduty:*" + ], + "Resource": "*" + }, + { + "Sid": "FirewallFullAccess", + "Effect": "Allow", + "Action": [ + "fms:*" + ], + "Resource": "*" + }, + { + "Sid": "KMSManagementFullAccess", + "Effect": "Allow", + "Action": [ + "kms:*" + ], + "Resource": "*" + }, + { + "Sid": "SecurityHubServiceLinkedRole", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "securityhub.amazonaws.com" + } + } + }, + { + "Sid": "CreateServiceLinkedRoleGuardDuty", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "guardduty.amazonaws.com", + "malware-protection.guardduty.amazonaws.com" + ] + } + } + }, + { + "Sid": "AllowAccessToCreateInspectorSlr", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "agentless.inspector2.amazonaws.com", + "inspector2.amazonaws.com" + ] + } + } + }, + { + "Sid": "CreateSLRForACM", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/acm.amazonaws.com/AWSServiceRoleForCertificateManager*", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "acm.amazonaws.com" + } + } + }, + { + "Sid": "ManageSLRForACM", + "Effect": "Allow", + "Action": [ + "iam:DeleteServiceLinkedRole" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/acm.amazonaws.com/AWSServiceRoleForCertificateManager*" + }, + { + "Sid": "ShieldFullAccess", + "Effect": "Allow", + "Action": [ + "shield:*" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/shield.amazonaws.com/AWSServiceRoleForAWSShield", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "shield.amazonaws.com" + } + } + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_management.json b/infrastructure/stacks/account_policies/policies/rw_management.json new file mode 100644 index 00000000..c4b9f41a --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_management.json @@ -0,0 +1,162 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ApiGatewayFullAccess", + "Effect": "Allow", + "Action": [ + "apigateway:*" + ], + "Resource": "arn:aws:apigateway:*::/*" + }, + { + "Sid": "ApiGatewayDomainNameAccess", + "Effect": "Allow", + "Action": [ + "apigateway:GET" + ], + "Resource": "arn:aws:apigateway:*:*:/domainnameaccessassociations" + }, + { + "Sid": "ApiGatewayExecuteFullAccess", + "Effect": "Allow", + "Action": [ + "execute-api:Invoke", + "execute-api:ManageConnections" + ], + "Resource": "arn:aws:execute-api:*:*:*" + }, + { + "Sid": "EventBridgeAndSchedulerFullAccess", + "Effect": "Allow", + "Action": [ + "events:*", + "schemas:*", + "scheduler:*", + "pipes:*" + ], + "Resource": "*" + }, + { + "Sid": "IAMCreateServiceLinkedRoleForApiDestinations", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/apidestinations.events.amazonaws.com/AWSServiceRoleForAmazonEventBridgeApiDestinations", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "apidestinations.events.amazonaws.com" + } + } + }, + { + "Sid": "IAMCreateServiceLinkedRoleForAmazonEventBridgeSchemas", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/schemas.amazonaws.com/AWSServiceRoleForSchemas", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "schemas.amazonaws.com" + } + } + }, + { + "Sid": "IAMPassRoleAccessForEventBridge", + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": "arn:aws:iam::*:role/*", + "Condition": { + "StringLike": { + "iam:PassedToService": "events.amazonaws.com" + } + } + }, + { + "Sid": "IAMPassRoleAccessForScheduler", + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": "arn:aws:iam::*:role/*", + "Condition": { + "StringLike": { + "iam:PassedToService": "scheduler.amazonaws.com" + } + } + }, + { + "Sid": "IAMPassRoleAccessForPipes", + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": "arn:aws:iam::*:role/*", + "Condition": { + "StringLike": { + "iam:PassedToService": "pipes.amazonaws.com" + } + } + }, + { + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "ssm.amazonaws.com" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "iam:DeleteServiceLinkedRole" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*" + }, + { + "Sid": "SSMFullAccess", + "Effect": "Allow", + "Action": [ + "ssm:*" + ], + "Resource": "*" + }, + { + "Sid": "OpenSearchServiceFullAccess", + "Effect": "Allow", + "Action": [ + "es:*", + "osis:*", + "opensearch:*", + "aoss:*" + ], + "Resource": "*" + }, + { + "Sid": "SQSFullAccess", + "Effect": "Allow", + "Action": [ + "sqs:*" + ], + "Resource": "*" + }, + { + "Sid": "SNSFullAccess", + "Effect": "Allow", + "Action": "sns:*", + "Resource": "*" + }, + { + "Sid": "SecretsManagerFullAccess", + "Effect": "Allow", + "Action": [ + "secretsmanager:*" + ], + "Resource": "*" + }, + { + "Sid": "AppConfigFullAccess", + "Effect": "Allow", + "Action": [ + "appconfig:*" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_monitoring.json b/infrastructure/stacks/account_policies/policies/rw_monitoring.json new file mode 100644 index 00000000..fb7505c8 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_monitoring.json @@ -0,0 +1,48 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CloudWatchFullAccess", + "Effect": "Allow", + "Action": [ + "application-autoscaling:*", + "application-signals:*", + "cloudwatch:*", + "logs:*", + "oam:ListSinks", + "rum:*", + "synthetics:*", + "xray:*" + ], + "Resource": "*" + }, + { + "Sid": "CloudWatchApplicationSignalsServiceLinkedRolePermissions", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/application-signals.cloudwatch.amazonaws.com/AWSServiceRoleForCloudWatchApplicationSignals", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "application-signals.cloudwatch.amazonaws.com" + } + } + }, + { + "Sid": "EventsServicePermissions", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/events.amazonaws.com/AWSServiceRoleForCloudWatchEvents*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "events.amazonaws.com" + } + } + }, + { + "Sid": "OAMReadPermissions", + "Effect": "Allow", + "Action": "oam:ListAttachedLinks", + "Resource": "arn:aws:oam:*:*:sink/*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_networking.json b/infrastructure/stacks/account_policies/policies/rw_networking.json new file mode 100644 index 00000000..050c2a98 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_networking.json @@ -0,0 +1,37 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Route53FullAccess", + "Effect": "Allow", + "Action": [ + "route53:*", + "route53domains:*" + ], + "Resource": "*" + }, + { + "Sid": "AllowAssumeMgmtR53Role", + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Resource": "arn:aws:iam::*:role/ftrs-directory-of-services-mgmt-domain-name-cross-account-access" + }, + { + "Sid": "ACMFullAccess", + "Effect": "Allow", + "Action": [ + "acm:*" + ], + "Resource": "*" + }, + { + "Sid": "CloudFrontFullAccess", + "Effect": "Allow", + "Action": [ + "cloudfront:*", + "cloudfront-keyvaluestore:*" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_policies/policies/rw_security.json b/infrastructure/stacks/account_policies/policies/rw_security.json new file mode 100644 index 00000000..7b573d61 --- /dev/null +++ b/infrastructure/stacks/account_policies/policies/rw_security.json @@ -0,0 +1,179 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "IAMFullAccess", + "Effect": "Allow", + "Action": [ + "iam:GenerateCredentialReport", + "iam:List*", + "iam:GenerateServiceLastAccessedDetails", + "iam:TagRole", + "iam:DeletePolicy", + "iam:CreateRole", + "iam:DeleteRole", + "iam:AttachRolePolicy", + "iam:TagPolicy", + "iam:CreatePolicy", + "iam:PassRole", + "iam:Get*", + "iam:DetachRolePolicy", + "iam:SimulatePrincipalPolicy", + "iam:SimulateCustomPolicy", + "iam:CreatePolicyVersion", + "iam:DeletePolicyVersion", + "iam:TagOpenIDConnectProvider", + "iam:DeleteRolePolicy", + "iam:PutRolePolicy", + "iam:UpdateOpenIDConnectProviderThumbprint", + "iam:UntagPolicy", + "iam:UntagRole", + "iam:DeleteInstanceProfile" + ], + "Resource": "*" + }, + { + "Sid": "WAFClassicFullAccess", + "Effect": "Allow", + "Action": [ + "waf:*", + "waf-regional:*" + ], + "Resource": [ + "arn:aws:waf::*:bytematchset/*", + "arn:aws:waf::*:ipset/*", + "arn:aws:waf::*:ratebasedrule/*", + "arn:aws:waf::*:rule/*", + "arn:aws:waf::*:sizeconstraintset/*", + "arn:aws:waf::*:sqlinjectionset/*", + "arn:aws:waf::*:webacl/*", + "arn:aws:waf::*:xssmatchset/*", + "arn:aws:waf::*:regexmatch/*", + "arn:aws:waf::*:regexpatternset/*", + "arn:aws:waf::*:geomatchset/*", + "arn:aws:waf::*:rulegroup/*", + "arn:aws:waf:*:*:changetoken/*", + "arn:aws:waf-regional:*:*:bytematchset/*", + "arn:aws:waf-regional:*:*:ipset/*", + "arn:aws:waf-regional:*:*:ratebasedrule/*", + "arn:aws:waf-regional:*:*:rule/*", + "arn:aws:waf-regional:*:*:sizeconstraintset/*", + "arn:aws:waf-regional:*:*:sqlinjectionset/*", + "arn:aws:waf-regional:*:*:webacl/*", + "arn:aws:waf-regional:*:*:xssmatchset/*", + "arn:aws:waf-regional:*:*:regexmatch/*", + "arn:aws:waf-regional:*:*:regexpatternset/*", + "arn:aws:waf-regional:*:*:geomatchset/*", + "arn:aws:waf-regional:*:*:rulegroup/*", + "arn:aws:waf-regional:*:*:changetoken/*" + ] + }, + { + "Sid": "WAFV2FullAccess", + "Effect": "Allow", + "Action": [ + "wafv2:*" + ], + "Resource": [ + "arn:aws:wafv2:*:*:*/webacl/*/*", + "arn:aws:wafv2:*:*:*/ipset/*/*", + "arn:aws:wafv2:*:*:*/managedruleset/*/*", + "arn:aws:wafv2:*:*:*/rulegroup/*/*", + "arn:aws:wafv2:*:*:*/regexpatternset/*/*" + ] + }, + { + "Sid": "AllowDisassociateWebACL", + "Effect": "Allow", + "Action": [ + "wafv2:DisassociateWebACL" + ], + "Resource": "*" + }, + { + "Sid": "AllowActionsForAppSync", + "Effect": "Allow", + "Action": [ + "appsync:SetWebACL" + ], + "Resource": "arn:aws:appsync:*:*:apis/*" + }, + { + "Sid": "AllowListActionsForAppSync", + "Effect": "Allow", + "Action": [ + "appsync:ListGraphqlApis" + ], + "Resource": "*" + }, + { + "Sid": "AllowActionsForCognito", + "Effect": "Allow", + "Action": [ + "cognito-idp:AssociateWebACL", + "cognito-idp:DisassociateWebACL", + "cognito-idp:GetWebACLForResource" + ], + "Resource": "arn:aws:cognito-idp:*:*:userpool/*" + }, + { + "Sid": "AllowListActionsForCognito", + "Effect": "Allow", + "Action": [ + "cognito-idp:ListUserPools", + "cognito-idp:ListResourcesForWebACL" + ], + "Resource": "*" + }, + { + "Sid": "AllowActionsForAppRunner", + "Effect": "Allow", + "Action": [ + "apprunner:AssociateWebAcl", + "apprunner:DisassociateWebAcl", + "apprunner:DescribeWebAclForService" + ], + "Resource": "arn:aws:apprunner:*:*:service/*/*" + }, + { + "Sid": "AllowListActionsForAppRunner", + "Effect": "Allow", + "Action": [ + "apprunner:ListServices", + "apprunner:ListAssociatedServicesForWebAcl" + ], + "Resource": "*" + }, + { + "Sid": "AllowActionsForAmplify", + "Effect": "Allow", + "Action": [ + "amplify:AssociateWebACL", + "amplify:DisassociateWebACL", + "amplify:GetWebACLForResource" + ], + "Resource": "arn:aws:amplify:*:*:apps/*" + }, + { + "Sid": "AllowListActionsForAmplify", + "Effect": "Allow", + "Action": [ + "amplify:ListApps", + "amplify:ListResourcesForWebACL" + ], + "Resource": "*" + }, + { + "Sid": "AllowKMSDecryptDescribeKey", + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + "kms:GenerateDataKey", + "kms:ListAliases", + "kms:ListKeys" + ], + "Resource": "*" + } + ] +} diff --git a/infrastructure/stacks/account_wide/README.md b/infrastructure/stacks/account_wide/README.md new file mode 100644 index 00000000..181ee8d4 --- /dev/null +++ b/infrastructure/stacks/account_wide/README.md @@ -0,0 +1,77 @@ +# Account-Wide Infrastructure + +Infrastructure that is deployed once per environment inside an account. + +> Note: Deploy this stack using the `default` workspace. + +This stack provisions: + +1. IAM role for GitHub Actions (via OIDC) +2. Environment wide VPC, including public, private, and database subnets +3. A performance EC2 host for Apache `JMeter`–based testing + +--- + +## Performance EC2 (Apache `JMeter`) + +A single Amazon Linux 2023 EC2 instance in a private `subnet` for performance testing. Access is through AWS Systems Manager Session Manager (no inbound SSH). On first boot, user data installs Java, Apache `JMeter`, a minimal plugin set, an optional JWT library, and a convenience wrapper. + +### What this stack creates + +- EC2 instance in the first private `subnet` of the environment wide VPC +- Dedicated security group with minimal egress + - TCP 443 to 0.0.0.0/0 (HTTPS; required for downloads and APIs; private subnets egress via NAT) + - UDP 53 to the VPC Route 53 Resolver only (CIDR: `${cidrhost(var.vpc["cidr"], 2)}/32`) (DNS) + - UDP 123 to public NTP (CIDR: `0.0.0.0/0`) to allow time sync when link-local IPs cannot be referenced in code +- Dedicated NACLs with minimal DNS and NTP allowances (because NACLs are stateless) + - DNS: Outbound UDP 53 to the VPC resolver (`${cidrhost(var.vpc["cidr"], 2)}/32`), and inbound UDP 32768–65535 from the resolver + - NTP: Outbound UDP 123 to public NTP (`0.0.0.0/0`), and inbound UDP 32768–65535 from the internet (broader; see note below) +- IAM role and instance profile attached to the instance + - Managed policy: `AmazonSSMManagedInstanceCore` +- On first boot, user data installs: + - Java 17 (`Amazon Corretto` with OpenJDK fallback) and base tools + - Apache `JMeter` (from archive.apache.org) under `/opt/jmeter/current` + - Apache `JMeter` Plugin Manager 1.11 and `cmdrunner` 2.3 + - Default plugins: `jpgc-graphs-basic` and `jpgc-graphs-additional` + - JWT library JAR (version configurable) + - Symlinks: `/usr/local/bin/jmeter` and `/usr/local/bin/jmeter-server` + - Wrapper: `/usr/local/bin/jmeter-run` + - SSM Agent installed and enabled (falls back to regional S3 RPM if needed) + - Logs written to `/var/log/user-data.log` + +### Variables (selected) + +- `performance_instance_type` (string, default `t3.small`) +- `performance_volume_size` (number, default `30`; must be ≥ 30 GiB) +- `performance_version` (string, Apache `JMeter` version, default `5.6.3`) +- `performance_poweroff_after_setup` (true/false, default `true`) — power off after install to avoid idle cost +- `performance_ami_name_pattern` (list(string), default `['al2023-ami-*-x86_64']`) +- `performance_ami_architectures` (list(string), default `['x86_64']`) +- `performance_jwt_dependency_version` (string, default `4.5.0`) + +Set these in your environment tfvars, for example `infrastructure/environments/dev/account_wide.tfvars`. + +### Prerequisites + +- Outbound internet access from private subnets (typically via a NAT Gateway) so the instance can download Apache `JMeter`, plugins, and the JWT JAR +- Alternatively, configure VPC interface endpoints for SSM/SSMMessages/EC2Messages if operating without NAT +- The account-level GitHub runner role needs permission to create and pass the instance role/profile (configured centrally in this repository) + +### Usage + +- Plan and apply this stack with the account-wide tfvars and your environment tfvars +- After apply, connect using Session Manager from the AWS Console or CLI +- Validate installation on the instance: + - Run `jmeter -v` or execute tests with `jmeter-run` as needed + - Inspect `/var/log/user-data.log` for provisioning details + +### Notes on NTP configuration + +- Best practice is to restrict NTP to the AWS-provided link-local time sync endpoint, but where referencing a literal IP in code is prohibited, this configuration uses `0.0.0.0/0` for UDP 123 egress and the corresponding inbound ephemeral range in NACLs +- Consider tightening to the AWS time sync endpoint in a follow-up if policy allows referencing link-local IPs in infra code + +### Troubleshooting + +- If Apache `JMeter`, plugins, or the JWT JAR are missing, check `/var/log/user-data.log` and confirm outbound access +- Confirm the SSM Agent is running: `systemctl status amazon-ssm-agent` +- `JAVA_HOME` and `JMETER_HOME` are exported in profile scripts; the `jmeter-run` wrapper also sets them if missing diff --git a/infrastructure/stacks/account_wide/data.tf b/infrastructure/stacks/account_wide/data.tf new file mode 100644 index 00000000..ddc9fc48 --- /dev/null +++ b/infrastructure/stacks/account_wide/data.tf @@ -0,0 +1,12 @@ +data "aws_availability_zones" "available_azs" { + state = "available" + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} +data "aws_subnet" "vpc_private_subnets_by_count" { + count = length(module.vpc.private_subnets) + id = module.vpc.private_subnets[count.index] +} + diff --git a/infrastructure/stacks/account_wide/kms.tf b/infrastructure/stacks/account_wide/kms.tf new file mode 100644 index 00000000..816a9c69 --- /dev/null +++ b/infrastructure/stacks/account_wide/kms.tf @@ -0,0 +1,7 @@ +module "dynamodb_encryption_key" { + source = "../../modules/kms" + alias_name = local.kms_aliases.dynamodb + account_id = data.aws_caller_identity.current.account_id + aws_service_name = "dynamodb.amazonaws.com" + description = "Encryption key for DynamoDB tables in ${var.environment} environment" +} diff --git a/infrastructure/stacks/account_wide/s3.tf b/infrastructure/stacks/account_wide/s3.tf new file mode 100644 index 00000000..0b9ae562 --- /dev/null +++ b/infrastructure/stacks/account_wide/s3.tf @@ -0,0 +1,106 @@ +# VPC Flow Logs S3 Bucket and Resource Policy +module "vpc_flow_logs_s3_bucket" { + source = "../../modules/s3" + bucket_name = "${local.resource_prefix}-${var.vpc_flow_logs_bucket_name}" + versioning = var.flow_log_s3_versioning + force_destroy = var.flow_log_s3_force_destroy + lifecycle_rule_inputs = [ + { + id = "delete_logs_older_than_x_days" + enabled = true + filter = { + prefix = "" + } + expiration = { + days = var.flow_logs_s3_expiration_days + } + } + ] + s3_logging_bucket = local.s3_logging_bucket +} + +resource "aws_s3_bucket_policy" "vpc_flow_logs_s3_bucket_policy" { + bucket = module.vpc_flow_logs_s3_bucket.s3_bucket_id + policy = data.aws_iam_policy_document.vpc_flow_logs_s3_bucket_policy_doc.json +} + +data "aws_iam_policy_document" "vpc_flow_logs_s3_bucket_policy_doc" { + statement { + sid = "AWSLogDeliveryWrite" + + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + + actions = ["s3:PutObject"] + + resources = ["${module.vpc_flow_logs_s3_bucket.s3_bucket_arn}/*"] + } + + statement { + sid = "AWSLogDeliveryAclCheck" + + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + + actions = ["s3:GetBucketAcl"] + + resources = [module.vpc_flow_logs_s3_bucket.s3_bucket_arn] + } +} + +# Subnet Flow Logs S3 Bucket and Resource Policy +module "subnet_flow_logs_s3_bucket" { + source = "../../modules/s3" + bucket_name = "${local.resource_prefix}-${var.subnet_flow_logs_bucket_name}" + versioning = var.flow_log_s3_versioning + force_destroy = var.flow_log_s3_force_destroy + lifecycle_rule_inputs = [ + { + id = "delete_logs_older_than_x_days" + enabled = true + filter = { + prefix = "" + } + expiration = { + days = var.flow_logs_s3_expiration_days + } + } + ] +} + +resource "aws_s3_bucket_policy" "subnet_flow_logs_s3_bucket_policy" { + bucket = module.subnet_flow_logs_s3_bucket.s3_bucket_id + policy = data.aws_iam_policy_document.subnet_flow_logs_s3_bucket_policy_doc.json +} + +data "aws_iam_policy_document" "subnet_flow_logs_s3_bucket_policy_doc" { + statement { + sid = "AWSLogDeliveryWrite" + + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + + actions = ["s3:PutObject"] + + resources = ["${module.subnet_flow_logs_s3_bucket.s3_bucket_arn}/*"] + } + + statement { + sid = "AWSLogDeliveryAclCheck" + + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + + actions = ["s3:GetBucketAcl"] + + resources = [module.subnet_flow_logs_s3_bucket.s3_bucket_arn] + } +} diff --git a/infrastructure/stacks/account_wide/templates/performance_user_data.sh.tmpl b/infrastructure/stacks/account_wide/templates/performance_user_data.sh.tmpl new file mode 100644 index 00000000..f91b2ca3 --- /dev/null +++ b/infrastructure/stacks/account_wide/templates/performance_user_data.sh.tmpl @@ -0,0 +1,174 @@ +#!/bin/bash +set -euo pipefail + +# Log user-data to file as in gp_search +exec > >(tee -a /var/log/user-data.log) 2>&1 + +REGION="${aws_region}" +JVER="${performance_version}" +POWEROFF="${performance_poweroff_after_setup}" +JWT_VER="${performance_jwt_dependency_version}" + +: "$${JAVA_HOME:=}" # Predefine to avoid 'JAVA_HOME: unbound variable' under set -u + +# Update base packages (non-fatal if repos are temporarily unavailable) +if command -v dnf >/dev/null 2>&1; then + dnf -y update || true +elif command -v yum >/dev/null 2>&1; then + yum -y update || true +fi + +# Install Java 17 and base tools with fallback (dnf then yum) +BASE_PKGS="unzip jq curl wget tar openssh-clients make" +if command -v dnf >/dev/null 2>&1; then + dnf -y install $BASE_PKGS || true + dnf -y install java-17-amazon-corretto-headless || true + if ! command -v java >/dev/null 2>&1; then + dnf -y install java-17-openjdk-headless || true + fi +elif command -v yum >/dev/null 2>&1; then + yum -y install $BASE_PKGS || true + yum -y install java-17-amazon-corretto-headless || true + if ! command -v java >/dev/null 2>&1; then + yum -y install java-17-openjdk-headless || true + fi +fi + +# Install AWS CLI v2 (prefer package manager, fallback to official zip) +if ! command -v aws >/dev/null 2>&1; then + if command -v dnf >/dev/null 2>&1; then + dnf -y install awscli || true + elif command -v yum >/dev/null 2>&1; then + yum -y install awscli || true + fi +fi +if ! command -v aws >/dev/null 2>&1; then + TMPDIR=$(mktemp -d) + pushd "$TMPDIR" >/dev/null + curl -fsSL "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip -q awscliv2.zip + ./aws/install --update + popd >/dev/null + rm -rf "$TMPDIR" +fi + +# Set JAVA_HOME system-wide if java is present +if command -v java >/dev/null 2>&1; then + JB=$(readlink -f "$(command -v java)" 2>/dev/null || command -v java) + JH=$(dirname "$(dirname "$JB")") + cat >/etc/profile.d/java_home.sh </etc/profile.d/aws_default_region.sh </etc/profile.d/jmeter.sh <<'ENVJ' +export JMETER_HOME=/opt/jmeter/apache-jmeter +export PATH="$JMETER_HOME/bin:$PATH" +ENVJ +chmod 0644 /etc/profile.d/jmeter.sh + +JM_HOME="$${JM_BASE}/current" + +# Ensure SSM Agent is installed and running with fallback to S3 RPM +if command -v dnf >/dev/null 2>&1; then + dnf -y install amazon-ssm-agent || true +elif command -v yum >/dev/null 2>&1; then + yum -y install amazon-ssm-agent || true +fi +if ! rpm -q amazon-ssm-agent >/dev/null 2>&1; then + rpm -Uvh --force "https://s3.$${REGION}.amazonaws.com/amazon-ssm-$${REGION}/latest/linux_amd64/amazon-ssm-agent.rpm" || true +fi +systemctl enable amazon-ssm-agent || true +systemctl restart amazon-ssm-agent || systemctl start amazon-ssm-agent || true + +# Always install JMeter Plugin Manager, cmdrunner, and a sensible default plugin set +PLUGINS_MANAGER_VERSION=1.11 +CMDRUNNER_VERSION=2.3 +DEFAULT_PLUGINS="jpgc-graphs-basic,jpgc-graphs-additional,jpgc-tst,jpgc-casutg" + +mkdir -p "$${JM_HOME}/lib/ext" "$${JM_HOME}/lib" +curl -fL -o "$${JM_HOME}/lib/ext/jmeter-plugins-manager-$${PLUGINS_MANAGER_VERSION}.jar" \ + "https://repo1.maven.org/maven2/kg/apc/jmeter-plugins-manager/$${PLUGINS_MANAGER_VERSION}/jmeter-plugins-manager-$${PLUGINS_MANAGER_VERSION}.jar" +curl -fL -o "$${JM_HOME}/lib/cmdrunner-$${CMDRUNNER_VERSION}.jar" \ + "https://repo1.maven.org/maven2/kg/apc/cmdrunner/$${CMDRUNNER_VERSION}/cmdrunner-$${CMDRUNNER_VERSION}.jar" + +# Setup PluginManagerCMD and install default plugins +java -cp "$${JM_HOME}/lib/ext/jmeter-plugins-manager-$${PLUGINS_MANAGER_VERSION}.jar" \ + org.jmeterplugins.repository.PluginManagerCMDInstaller || true +bash "$${JM_HOME}/bin/PluginsManagerCMD.sh" install "$${DEFAULT_PLUGINS}" || true + +# Always install JWT dependency (used by IS_Proxy_Test_Plan) +mkdir -p "$${JM_HOME}/lib" +curl -fL -o "$${JM_HOME}/lib/java-jwt-$${JWT_VER}.jar" \ + "https://repo1.maven.org/maven2/com/auth0/java-jwt/$${JWT_VER}/java-jwt-$${JWT_VER}.jar" || true + +# Provide convenience symlinks like gp_search +ln -sf /opt/jmeter/apache-jmeter/bin/jmeter /usr/local/bin/jmeter +ln -sf /opt/jmeter/apache-jmeter/bin/jmeter-server /usr/local/bin/jmeter-server + +# Always install a convenience wrapper for non-interactive runs +cat >/usr/local/bin/jmeter-run <<'WRAP' +#!/usr/bin/env bash +set -euo pipefail +# Ensure JAVA_HOME if not set +if [[ -z "$${JAVA_HOME:-}" ]]; then + if command -v java >/dev/null 2>&1; then + JB=$(readlink -f "$(command -v java)" 2>/dev/null || command -v java) + export JAVA_HOME="$(dirname "$(dirname "$JB")")" + export PATH="$JAVA_HOME/bin:$PATH" + fi +fi +# Ensure JMETER_HOME if not set +if [[ -z "$${JMETER_HOME:-}" ]]; then + export JMETER_HOME="/opt/jmeter/current" + export PATH="$JMETER_HOME/bin:$PATH" +fi +if ! command -v jmeter >/dev/null 2>&1; then + echo "jmeter not found in PATH" >&2 + exit 1 +fi +exec jmeter "$@" +WRAP +chmod +x /usr/local/bin/jmeter-run + +# Print JMeter version +jmeter -v || true + +# Optionally power off after setup to avoid idle costs +if [[ "$${POWEROFF}" == "true" ]]; then + shutdown -h +1 "Powering off after JMeter installation" +fi diff --git a/infrastructure/stacks/account_wide/variables.tf b/infrastructure/stacks/account_wide/variables.tf new file mode 100644 index 00000000..79864000 --- /dev/null +++ b/infrastructure/stacks/account_wide/variables.tf @@ -0,0 +1,88 @@ +variable "enable_nat_gateway" { + description = "Whether to create a NAT Gateway for the VPC" + type = bool +} + +variable "single_nat_gateway" { + description = "Whether to use a single NAT Gateway in the VPC" + type = bool +} + +variable "one_nat_gateway_per_az" { + description = "Whether to create only one NAT Gateway per AZ" + type = bool +} + +variable "log_group_retention_in_days" { + description = "Number of days to retain logs" + default = 7 +} + +variable "gateway_vpc_endpoint_type" { + description = "The VPC enpoint type" + type = string + default = "Gateway" +} + +variable "private_dedicated_network_acl" { + description = "Whether to use dedicated network ACL (not default) and custom rules for private subnets" + type = bool +} + +variable "public_dedicated_network_acl" { + description = "Whether to use dedicated network ACL (not default) and custom rules for public subnets" + type = bool +} + +variable "flow_log_destination_type" { + description = "THe destination type for the flow logs" + type = string +} + +variable "flow_log_file_format" { + description = "The file format for the flow logs" + type = string +} + +variable "vpc_flow_logs_bucket_name" { + description = "The VPC Flow logs bucket name" + type = string +} + +variable "subnet_flow_logs_bucket_name" { + description = "The Subnet Flow logs bucket name" + type = string +} + +variable "flow_log_s3_versioning" { + description = "Whether to enable versioning on the S3 bucket" + type = bool +} + +variable "flow_log_s3_force_destroy" { + description = "Whether to forcefully destroy the bucket when it contains objects" + type = bool + default = false +} + +variable "flow_logs_s3_expiration_days" { + description = "The number of days before the VPC flow logs are deleted" + type = number +} + +variable "vpc" { + description = "A map of VPC configuration, including VPC ID, CIDR block, and other networking details" + type = map(any) + default = {} +} + +variable "enable_flow_log" { + description = "Whether VPC Flow logs are enabled or not" + type = bool + default = false +} +variable "enable_s3_kms_encryption" { + description = "Whether to enable KMS encryption for S3 buckets" + type = bool + default = false +} diff --git a/infrastructure/stacks/account_wide/vpc.tf b/infrastructure/stacks/account_wide/vpc.tf new file mode 100644 index 00000000..68977b25 --- /dev/null +++ b/infrastructure/stacks/account_wide/vpc.tf @@ -0,0 +1,149 @@ +# trivy:ignore:aws-vpc-no-public-ingress-acl : TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-575 +# trivy:ignore:aws-autoscaling-enable-at-rest-encryption : TODO https://nhsd-jira.digital.nhs.uk/browse/FDOS-575 +module "vpc" { + # Module version: 6.0.1 + source = "git::https://github.com/terraform-aws-modules/terraform-aws-vpc.git?ref=a0307d4d1807de60b3868b96ef1b369808289157" + + name = "${local.account_prefix}-${var.vpc["name"]}" + cidr = var.vpc["cidr"] + enable_nat_gateway = var.enable_nat_gateway + single_nat_gateway = var.single_nat_gateway + one_nat_gateway_per_az = var.one_nat_gateway_per_az + + azs = slice(data.aws_availability_zones.available_azs.names, 0, 3) + public_subnets = local.public_subnets + private_subnets = local.private_subnets + #database_subnets = local.database_subnets + + # NACL configuration + private_dedicated_network_acl = var.private_dedicated_network_acl + private_inbound_acl_rules = local.network_acls["default_inbound"] + private_outbound_acl_rules = local.network_acls["default_outbound"] + + public_dedicated_network_acl = var.public_dedicated_network_acl + public_inbound_acl_rules = local.network_acls["default_inbound"] + public_outbound_acl_rules = local.network_acls["default_outbound"] + + # VPC Flow Logs + enable_flow_log = var.enable_flow_log + flow_log_destination_type = var.flow_log_destination_type + flow_log_destination_arn = module.vpc_flow_logs_s3_bucket.s3_bucket_arn + flow_log_file_format = var.flow_log_file_format + + # Manage Default NACL rules for the VPC + default_network_acl_ingress = [] + default_network_acl_egress = [] +} + +locals { + + public_subnets = [var.vpc["public_subnet_a"], var.vpc["public_subnet_b"], var.vpc["public_subnet_c"]] + private_subnets = [var.vpc["private_subnet_a"], var.vpc["private_subnet_b"], var.vpc["private_subnet_c"]] + + network_acls = { + + default_inbound = [ + { + rule_number = 900 + rule_action = "allow" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_block = "0.0.0.0/0" + }, + { + rule_number = 901 + rule_action = "allow" + from_port = 0 + to_port = 65535 + protocol = "tcp" + ipv6_cidr_block = "::/0" + }, + # DNS responses: Allow inbound UDP 32768–65535 only from the VPC resolver (base+2). + # Matches the outbound UDP 53 rule below. Required because NACLs are stateless and + # responses target an ephemeral source port on the instance. + { + rule_number = 902 + rule_action = "allow" + from_port = 32768 + to_port = 65535 + protocol = "udp" + cidr_block = format("%s/32", cidrhost(var.vpc["cidr"], 2)) + }, + # NTP responses: Allow inbound UDP 32768–65535 from the internet to support public NTP servers. + # Note: Broader than using the Amazon Time Sync IP; preferred only if a literal IP cannot be used. + # trivy:ignore:aws-vpc-no-public-ingress-acl : NTP inbound ephemeral (UDP 32768–65535) required for time sync responses + { + rule_number = 903 + rule_action = "allow" + from_port = 32768 + to_port = 65535 + protocol = "udp" + cidr_block = "0.0.0.0/0" + } + ] + + default_outbound = [ + { + rule_number = 900 + rule_action = "allow" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_block = "0.0.0.0/0" + }, + { + rule_number = 901 + rule_action = "allow" + from_port = 0 + to_port = 65535 + protocol = "tcp" + ipv6_cidr_block = "::/0" + }, + # DNS: Allow outbound UDP 53 only to the VPC resolver (base+2). + # Required for name resolution (e.g., SSM endpoints). Matching inbound UDP responses + # are permitted by the inbound ephemeral rule above (32768–65535 from the resolver). + { + rule_number = 902 + rule_action = "allow" + from_port = 53 + to_port = 53 + protocol = "udp" + cidr_block = format("%s/32", cidrhost(var.vpc["cidr"], 2)) + }, + # NTP: Allow outbound UDP 123 to the internet to support public NTP servers (e.g., time.aws.com, pool.ntp.org). + # Note: Broader than using the Amazon Time Sync IP; preferred only if a literal IP cannot be used. + # trivy:ignore:aws-vpc-no-public-egress-acl : NTP egress (UDP 123) required to reach public NTP servers when link-local IP is disallowed + { + rule_number = 903 + rule_action = "allow" + from_port = 123 + to_port = 123 + protocol = "udp" + cidr_block = "0.0.0.0/0" + } + ] + } +} + +resource "aws_flow_log" "public_subnet_flow_log_s3" { + count = length(local.public_subnets) + log_destination = module.subnet_flow_logs_s3_bucket.s3_bucket_arn + log_destination_type = var.flow_log_destination_type + traffic_type = "REJECT" + destination_options { + per_hour_partition = true + } + subnet_id = module.vpc.public_subnets[count.index] +} + +resource "aws_flow_log" "private_subnet_flow_log_s3" { + count = length(local.private_subnets) + log_destination = module.subnet_flow_logs_s3_bucket.s3_bucket_arn + log_destination_type = var.flow_log_destination_type + traffic_type = "REJECT" + destination_options { + per_hour_partition = true + } + subnet_id = module.vpc.private_subnets[count.index] +} diff --git a/infrastructure/stacks/account_wide/vpce.tf b/infrastructure/stacks/account_wide/vpce.tf new file mode 100644 index 00000000..a6d899c0 --- /dev/null +++ b/infrastructure/stacks/account_wide/vpce.tf @@ -0,0 +1,50 @@ +resource "aws_vpc_endpoint" "dynamodb_vpce" { + vpc_id = module.vpc.vpc_id + service_name = "com.amazonaws.${var.aws_region}.dynamodb" + vpc_endpoint_type = var.gateway_vpc_endpoint_type + route_table_ids = module.vpc.private_route_table_ids + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Sid = "AllowAccessFromVPC", + Effect = "Allow", + Principal = "*", + Action = "dynamodb:*", + Resource = "*", + Condition = { + StringEquals = { + "aws:SourceVpc" : module.vpc.vpc_id + } + } + }, + { + Sid = "DenyAccessFromOutsideVPC", + Effect = "Deny", + Principal = "*", + Action = "*", + Resource = "*", + Condition = { + StringNotEquals = { + "aws:SourceVpc" : module.vpc.vpc_id + } + } + } + ] + }) + + tags = { + Name = "${local.resource_prefix}-dynamodb-gateway-vpc-endpoint" + } +} + +resource "aws_vpc_endpoint" "s3_vpce" { + vpc_id = module.vpc.vpc_id + service_name = "com.amazonaws.${var.aws_region}.s3" + vpc_endpoint_type = var.gateway_vpc_endpoint_type + route_table_ids = module.vpc.private_route_table_ids + tags = { + Name = "${local.resource_prefix}-s3-gateway-vpc-endpoint" + } +} diff --git a/infrastructure/stacks/artefact_management/data.tf b/infrastructure/stacks/artefact_management/data.tf new file mode 100644 index 00000000..9aa3fd5f --- /dev/null +++ b/infrastructure/stacks/artefact_management/data.tf @@ -0,0 +1,18 @@ +data "aws_iam_role" "app_github_runner_iam_role" { + name = "${var.repo_name}-${var.app_github_runner_role_name}" +} + +variable "aws_account_id_dev" { + description = "AWS Account ID for dev environment" + type = string +} + +# variable "aws_account_id_test" { +# description = "AWS Account ID for test environment" +# type = string +# } + +# variable "aws_account_id_prod" { +# description = "AWS Account ID for prod environment" +# type = string +# } diff --git a/infrastructure/stacks/artefact_management/s3.tf b/infrastructure/stacks/artefact_management/s3.tf new file mode 100644 index 00000000..d406b9ea --- /dev/null +++ b/infrastructure/stacks/artefact_management/s3.tf @@ -0,0 +1,48 @@ +module "artefacts_bucket" { + source = "../../modules/s3" + bucket_name = local.artefacts_bucket +} + + +resource "aws_s3_bucket_policy" "artefacts_bucket_policy" { + depends_on = [module.artefacts_bucket] + bucket = local.artefacts_bucket + policy = data.aws_iam_policy_document.artefacts_bucket_policy.json +} + +data "aws_iam_policy_document" "artefacts_bucket_policy" { + statement { + principals { + type = "AWS" + identifiers = [ + "arn:aws:iam::${var.aws_account_id_dev}:role/${var.repo_name}-dev-${var.app_github_runner_role_name}" + ] + } + actions = [ + "s3:ListBucket", + ] + resources = [ + "${module.artefacts_bucket.s3_bucket_arn}" + ] + } + + statement { + principals { + type = "AWS" + identifiers = [ + "${data.aws_iam_role.app_github_runner_iam_role.arn}", + "arn:aws:iam::${var.aws_account_id_dev}:role/${var.repo_name}-dev-${var.app_github_runner_role_name}" + ] + } + actions = [ + "s3:GetObject", + "s3:GetObjectTagging", + "s3:DeleteObject", + "s3:PutObject", + "s3:PutObjectTagging" + ] + resources = [ + "${module.artefacts_bucket.s3_bucket_arn}/*", + ] + } +} diff --git a/infrastructure/stacks/github_runner/account_github_runner_policy.json.tpl b/infrastructure/stacks/github_runner/account_github_runner_policy.json.tpl index dd9b87df..e8f4e234 100644 --- a/infrastructure/stacks/github_runner/account_github_runner_policy.json.tpl +++ b/infrastructure/stacks/github_runner/account_github_runner_policy.json.tpl @@ -48,6 +48,28 @@ } } }, + { + "Sid": "AllowAccessAnalyzerServiceLinkedRoleCreation", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/access-analyzer.amazonaws.com/AWSServiceRoleForAccessAnalyzer", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "access-analyzer.amazonaws.com" + } + } + }, + { + "Sid": "AllowShieldServiceLinkedRoleCreation", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/shield.amazonaws.com/AWSServiceRoleForAWSShield", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "shield.amazonaws.com" + } + } + }, { "Sid": "MonitoringFullAccess", "Effect": "Allow", @@ -209,7 +231,7 @@ "Resource": "*" }, { - "Sid": "ShieldFullAccess", + "Sid": "AWSShieldFullAccess", "Effect": "Allow", "Action": [ "shield:*" @@ -256,6 +278,17 @@ "iam:AWSServiceName": "inspector2.amazonaws.com" } } + }, + { + "Sid": "AllowAccessAnalyser", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/access-analyzer.amazonaws.com/AWSServiceRoleForAccessAnalyzer", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "inspector2.amazonaws.com" + } + } } ] } diff --git a/infrastructure/stacks/github_runner/app_github_runner_policy.json.tpl b/infrastructure/stacks/github_runner/app_github_runner_policy.json.tpl index 79b251d2..59cbf8f4 100644 --- a/infrastructure/stacks/github_runner/app_github_runner_policy.json.tpl +++ b/infrastructure/stacks/github_runner/app_github_runner_policy.json.tpl @@ -226,7 +226,7 @@ "Resource": "*" }, { - "Sid": "ShieldFullAccess", + "Sid": "AWSShieldFullAccess", "Effect": "Allow", "Action": [ "shield:*" diff --git a/infrastructure/stacks/github_runner/iam_role.tf b/infrastructure/stacks/github_runner/iam_role.tf index c988f4ac..44e5ec44 100644 --- a/infrastructure/stacks/github_runner/iam_role.tf +++ b/infrastructure/stacks/github_runner/iam_role.tf @@ -62,13 +62,13 @@ EOF } resource "aws_iam_policy" "account_github_runner_policy" { - name = "${var.repo_name}${var.environment != "mgmt" ? "-${var.environment}" : ""}-${var.account_github_runner_role_name}" + name = "${var.repo_name}-${var.environment}-${var.account_github_runner_role_name}" description = "IAM policy for Account GitHub Actions runner" policy = jsonencode(local.account_github_runner_policy) } resource "aws_iam_policy" "app_github_runner_policy" { - name = "${var.repo_name}${var.environment != "mgmt" ? "-${var.environment}" : ""}-${var.app_github_runner_role_name}" + name = "${var.repo_name}-${var.environment}-${var.app_github_runner_role_name}" description = "IAM policy for App GitHub Actions runner" policy = jsonencode(local.app_github_runner_policy) } @@ -83,44 +83,44 @@ resource "aws_iam_role_policy_attachment" "app_github_runner_policy_attachment" policy_arn = aws_iam_policy.app_github_runner_policy.arn } -# data "aws_iam_policy_document" "app_runner_domain_name_cross_account_doc" { -# statement { -# effect = "Allow" -# actions = ["sts:AssumeRole"] -# resources = [ -# "arn:aws:iam::${var.mgmt_account_id}:role/${local.domain_cross_account_role}" -# ] -# } -# } - -# resource "aws_iam_policy" "app_runner_domain_name_cross_account_policy" { -# name = "${local.domain_cross_account_role}${var.environment != "mgmt" ? "-${var.environment}" : ""}-app-policy" -# description = "Allow cross-account AssumeRole into mgmt Route53 role" -# policy = data.aws_iam_policy_document.app_runner_domain_name_cross_account_doc.json -# } - -# resource "aws_iam_role_policy_attachment" "app_runner_domain_name_cross_account_policy_attachment" { -# role = aws_iam_role.app_github_runner_role.name -# policy_arn = aws_iam_policy.app_runner_domain_name_cross_account_policy.arn -# } - -# data "aws_iam_policy_document" "account_runner_domain_name_cross_account_doc" { -# statement { -# effect = "Allow" -# actions = ["sts:AssumeRole"] -# resources = [ -# "arn:aws:iam::${var.mgmt_account_id}:role/${local.domain_cross_account_role}" -# ] -# } -# } - -# resource "aws_iam_policy" "account_runner_domain_name_cross_account_policy" { -# name = "${local.domain_cross_account_role}${var.environment != "mgmt" ? "-${var.environment}" : ""}-account-policy" -# description = "Allow cross-account AssumeRole into mgmt Route53 role" -# policy = data.aws_iam_policy_document.account_runner_domain_name_cross_account_doc.json -# } - -# resource "aws_iam_role_policy_attachment" "account_runner_domain_name_cross_account_policy_attachment" { -# role = aws_iam_role.account_github_runner_role.name -# policy_arn = aws_iam_policy.account_runner_domain_name_cross_account_policy.arn -# } +data "aws_iam_policy_document" "app_runner_domain_name_cross_account_doc" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + resources = [ + "arn:aws:iam::${var.mgmt_account_id}:role/${local.domain_cross_account_role}" + ] + } +} + +resource "aws_iam_policy" "app_runner_domain_name_cross_account_policy" { + name = "${local.domain_cross_account_role}${var.environment != "mgmt" ? "-${var.environment}" : ""}-app-policy" + description = "Allow cross-account AssumeRole into mgmt Route53 role" + policy = data.aws_iam_policy_document.app_runner_domain_name_cross_account_doc.json +} + +resource "aws_iam_role_policy_attachment" "app_runner_domain_name_cross_account_policy_attachment" { + role = aws_iam_role.app_github_runner_role.name + policy_arn = aws_iam_policy.app_runner_domain_name_cross_account_policy.arn +} + +data "aws_iam_policy_document" "account_runner_domain_name_cross_account_doc" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + resources = [ + "arn:aws:iam::${var.mgmt_account_id}:role/${local.domain_cross_account_role}" + ] + } +} + +resource "aws_iam_policy" "account_runner_domain_name_cross_account_policy" { + name = "${local.domain_cross_account_role}${var.environment != "mgmt" ? "-${var.environment}" : ""}-account-policy" + description = "Allow cross-account AssumeRole into mgmt Route53 role" + policy = data.aws_iam_policy_document.account_runner_domain_name_cross_account_doc.json +} + +resource "aws_iam_role_policy_attachment" "account_runner_domain_name_cross_account_policy_attachment" { + role = aws_iam_role.account_github_runner_role.name + policy_arn = aws_iam_policy.account_runner_domain_name_cross_account_policy.arn +} diff --git a/infrastructure/stacks/github_runner/oidc-provider.tf b/infrastructure/stacks/github_runner/oidc-provider.tf new file mode 100644 index 00000000..2d886efb --- /dev/null +++ b/infrastructure/stacks/github_runner/oidc-provider.tf @@ -0,0 +1,9 @@ +resource "aws_iam_openid_connect_provider" "github_provider" { + url = var.oidc_provider_url + client_id_list = [ + var.oidc_client, + ] + thumbprint_list = [ + var.oidc_thumbprint + ] +} diff --git a/infrastructure/stacks/github_runner/variables.tf b/infrastructure/stacks/github_runner/variables.tf index 55f2552d..fca9158e 100644 --- a/infrastructure/stacks/github_runner/variables.tf +++ b/infrastructure/stacks/github_runner/variables.tf @@ -2,3 +2,13 @@ variable "github_org" { description = "The name of the git hub organisation - eg NHSDigital" } + +variable "oidc_provider_url" { + description = "Url of oidc provider" +} +variable "oidc_client" { + description = "Client of oidc provider - eg aws" +} +variable "oidc_thumbprint" { + description = "Thumbprint for oidc provider" +} diff --git a/infrastructure/stacks/triage/data.tf b/infrastructure/stacks/triage/data.tf new file mode 100644 index 00000000..d20342c1 --- /dev/null +++ b/infrastructure/stacks/triage/data.tf @@ -0,0 +1,35 @@ +data "aws_vpc" "vpc" { + filter { + name = "tag:Name" + values = ["${local.account_prefix}-vpc"] + } +} + +data "aws_subnets" "private_subnets" { + filter { + name = "vpc-id" + values = [data.aws_vpc.vpc.id] + } + + filter { + name = "tag:Name" + values = ["${local.account_prefix}-vpc-private-*"] + } + + filter { + name = "tag:CidrRange" + values = [var.vpc_private_subnet_cidr_range] + } +} + +data "aws_subnet" "private_subnets_details" { + for_each = toset(data.aws_subnets.private_subnets.ids) + id = each.value +} +data "aws_prefix_list" "dynamodb" { + name = "com.amazonaws.${var.aws_region}.dynamodb" +} + +data "aws_prefix_list" "s3" { + name = "com.amazonaws.${var.aws_region}.s3" +} diff --git a/infrastructure/stacks/triage/dynamodb.tf b/infrastructure/stacks/triage/dynamodb.tf new file mode 100644 index 00000000..34fc32b7 --- /dev/null +++ b/infrastructure/stacks/triage/dynamodb.tf @@ -0,0 +1,33 @@ +module "starting_coords" { + source = "../../modules/dynamodb" + table_name = "${local.resource_prefix}-StartingCoords" + hash_key = "Skillset" + range_key = "GenderAgeParty" + + attributes = [ + { name = "Skillset", type = "S" }, + { name = "GenderAgeParty", type = "S" } + ] +} + +module "triage_nodes" { + source = "../../modules/dynamodb" + table_name = "${local.resource_prefix}-TriageNodes" + hash_key = "Coordinate" + + attributes = [{ + name = "Coordinate" + type = "S" + }] +} + +module "bodymaps" { + source = "../../modules/dynamodb" + table_name = "${local.resource_prefix}-BodyMaps" + hash_key = "id" + + attributes = [{ + name = "id" + type = "S" + }] +} diff --git a/infrastructure/stacks/triage/iam.tf b/infrastructure/stacks/triage/iam.tf new file mode 100644 index 00000000..5a09ac8b --- /dev/null +++ b/infrastructure/stacks/triage/iam.tf @@ -0,0 +1,97 @@ +# locals { +# s3_lambda_policy = jsondecode(templatefile("${path.module}/s3_lambda_policy.json.tpl", { +# project = var.project +# repo_name = var.repo_name +# })) + +# dynamodb_policy = jsondecode(templatefile("${path.module}/apig_lambda_policy.json.tpl", { +# project = var.project +# repo_name = var.repo_name +# })) +# } + + +resource "aws_iam_role" "lambda_role" { + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow", + Principal = { + Service = "lambda.amazonaws.com" + }, + Action = "sts:AssumeRole" + }] + }) + name = "${local.resource_prefix}-lambda-role${local.workspace_suffix}" +} + +resource "aws_iam_policy" "s3_access" { + name = "${local.resource_prefix}-s3-access${local.workspace_suffix}" + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + Resource = [ + # bucket itself + "arn:aws:s3:::${module.pathway_artifact_bucket.s3_bucket_id}", + # all objects inside + "arn:aws:s3:::${module.pathway_artifact_bucket.s3_bucket_id}/*" + ] + }, + ] + }) +} + +resource "aws_iam_policy" "ddb_access" { + name = "${local.resource_prefix}-dynamo-db-access${local.workspace_suffix}" + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = [ + "dynamodb:GetItem", + "dynamodb:BatchGetItem", + "dynamodb:BatchWriteItem", + "dynamodb:Query", + "dynamodb:Scan" + ], + Resource = concat( + [ + module.starting_coords.dynamodb_table_arn, + module.triage_nodes.dynamodb_table_arn, + module.bodymaps.dynamodb_table_arn + ], + [ + "${module.starting_coords.dynamodb_table_arn}/index/*", + "${module.triage_nodes.dynamodb_table_arn}/index/*", + "${module.bodymaps.dynamodb_table_arn}/index/*" + ] + ) + }] + }) +} + +resource "aws_iam_role_policy_attachment" "s3_attach" { + role = aws_iam_role.lambda_role.name + policy_arn = aws_iam_policy.s3_access.arn +} + +resource "aws_iam_role_policy_attachment" "ddb_aatach" { + role = aws_iam_role.lambda_role.name + policy_arn = aws_iam_policy.ddb_access.arn +} + +# resource "aws_iam_role_policy_attachment" "attachments" { +# for_each = toset(var.policy_arns) + +# role = aws_iam_role.lambda_role.name +# policy_arn = each.value +# } + diff --git a/infrastructure/stacks/triage/lambda.tf b/infrastructure/stacks/triage/lambda.tf new file mode 100644 index 00000000..ebaae372 --- /dev/null +++ b/infrastructure/stacks/triage/lambda.tf @@ -0,0 +1,61 @@ + +module "s3lambda" { + source = "../../modules/lambda" + aws_region = var.aws_region + function_name = "${local.resource_prefix}-s3-Lambda" + policy_jsons = [aws_iam_policy.s3_access.policy, aws_iam_policy.ddb_access.policy] + handler = "s3_configurator.handler" + s3_bucket_name = local.artefacts_bucket + runtime = var.runtime + s3_key = var.s3_key + memory_size = var.mem_size + timeout = 60 + description = "Lambda function for S3 event processing in SAET Triage API" + + subnet_ids = [for subnet in data.aws_subnet.private_subnets_details : subnet.id] + security_group_ids = [aws_security_group.triage_api_lambda_security_group.id] + vpc_id = data.aws_vpc.vpc.id + + environment_variables = { + "ENVIRONMENT" = var.environment + "WORKSPACE" = terraform.workspace == "default" ? "" : terraform.workspace + "DATA_RELEASE_BUCKET" = module.pathway_artifact_bucket.s3_bucket_id + "BODY_MAP_NODE_TABLE" = module.bodymaps.dynamodb_table_name + "STARTING_NODE_TABLE" = module.starting_coords.dynamodb_table_name + "TRIAGE_NODE_TABLE" = module.triage_nodes.dynamodb_table_name + } + account_id = data.aws_caller_identity.current.account_id +} + +module "apiglambda" { + source = "../../modules/lambda" + aws_region = var.aws_region + function_name = "${local.resource_prefix}-apig-Lambda" + policy_jsons = [aws_iam_policy.ddb_access.policy, aws_iam_policy.s3_access.policy] + handler = "api_gateway_configurator.handler" + s3_bucket_name = local.artefacts_bucket + runtime = var.runtime + s3_key = var.s3_key + memory_size = var.mem_size + description = "Lambda function for App gateway processing event in SAET Triage API" + + subnet_ids = [for subnet in data.aws_subnet.private_subnets_details : subnet.id] + security_group_ids = [aws_security_group.triage_api_lambda_security_group.id] + vpc_id = data.aws_vpc.vpc.id + + environment_variables = { + "ENVIRONMENT" = var.environment + "WORKSPACE" = terraform.workspace == "default" ? "" : terraform.workspace + "DATA_RELEASE_BUCKET" = module.pathway_artifact_bucket.s3_bucket_id + "BODY_MAP_NODE_TABLE" = module.bodymaps.dynamodb_table_name + "STARTING_NODE_TABLE" = module.starting_coords.dynamodb_table_name + "TRIAGE_NODE_TABLE" = module.triage_nodes.dynamodb_table_name + } + account_id = data.aws_caller_identity.current.account_id +} + +resource "aws_lambda_alias" "apiglambda_live" { + name = "live" + function_name = module.apiglambda.lambda_function_name + function_version = module.apiglambda.lambda_function_version +} diff --git a/infrastructure/stacks/triage/restapi.tf b/infrastructure/stacks/triage/restapi.tf new file mode 100644 index 00000000..cd878eb0 --- /dev/null +++ b/infrastructure/stacks/triage/restapi.tf @@ -0,0 +1,97 @@ +locals { + api_method = "POST" + path_parts = ["FHIR", "R4", "triage"] +} + +resource "aws_api_gateway_rest_api" "triage" { + name = "${var.api_name}${local.workspace_suffix}" +} + +resource "aws_api_gateway_resource" "fhir" { + rest_api_id = aws_api_gateway_rest_api.triage.id + parent_id = aws_api_gateway_rest_api.triage.root_resource_id + path_part = local.path_parts[0] +} + +resource "aws_api_gateway_resource" "r4" { + rest_api_id = aws_api_gateway_rest_api.triage.id + parent_id = aws_api_gateway_resource.fhir.id + path_part = local.path_parts[1] +} + +resource "aws_api_gateway_resource" "triage" { + rest_api_id = aws_api_gateway_rest_api.triage.id + parent_id = aws_api_gateway_resource.r4.id + path_part = local.path_parts[2] +} + +resource "aws_api_gateway_method" "triage" { + rest_api_id = aws_api_gateway_rest_api.triage.id + resource_id = aws_api_gateway_resource.triage.id + http_method = local.api_method + authorization = var.authorization +} + +# Create method responses for the API methods +resource "aws_api_gateway_method_response" "triage" { + rest_api_id = aws_api_gateway_rest_api.triage.id + resource_id = aws_api_gateway_resource.triage.id + http_method = aws_api_gateway_method.triage.http_method + status_code = "200" +} + +resource "aws_api_gateway_integration" "triage" { + rest_api_id = aws_api_gateway_rest_api.triage.id + resource_id = aws_api_gateway_resource.triage.id + http_method = aws_api_gateway_method.triage.http_method + integration_http_method = var.int_http_method + type = var.type + uri = aws_lambda_alias.apiglambda_live.invoke_arn +} + +# Create integration responses for the API methods +resource "aws_api_gateway_integration_response" "triage" { + rest_api_id = aws_api_gateway_rest_api.triage.id + resource_id = aws_api_gateway_resource.triage.id + http_method = aws_api_gateway_method.triage.http_method + status_code = aws_api_gateway_method_response.triage.status_code + selection_pattern = "" + + depends_on = [aws_api_gateway_integration.triage] +} + +# Create a deployment to publish the API +resource "aws_api_gateway_deployment" "deployment" { + rest_api_id = aws_api_gateway_rest_api.triage.id + + depends_on = [ + aws_api_gateway_integration.triage, + aws_api_gateway_integration_response.triage, + aws_api_gateway_method.triage + ] + + # Force redeployment when any API resources change + triggers = { + redeployment = sha1(jsonencode({ + resources = { + fhir = aws_api_gateway_resource.fhir + r4 = aws_api_gateway_resource.r4 + triage = aws_api_gateway_resource.triage + } + methods = aws_api_gateway_method.triage + integrations = aws_api_gateway_integration.triage + integration_responses = aws_api_gateway_integration_response.triage + })) + } + + lifecycle { + create_before_destroy = true + } +} + +# Create a stage (environment) for the API +resource "aws_api_gateway_stage" "stage" { + deployment_id = aws_api_gateway_deployment.deployment.id + rest_api_id = aws_api_gateway_rest_api.triage.id + stage_name = var.stage_name +} diff --git a/infrastructure/stacks/triage/s3.tf b/infrastructure/stacks/triage/s3.tf new file mode 100644 index 00000000..4f8ff06f --- /dev/null +++ b/infrastructure/stacks/triage/s3.tf @@ -0,0 +1,4 @@ +module "pathway_artifact_bucket" { + source = "../../modules/s3" + bucket_name = "${local.resource_prefix}-artifact${local.workspace_suffix}" +} diff --git a/infrastructure/stacks/triage/security_group.tf b/infrastructure/stacks/triage/security_group.tf new file mode 100644 index 00000000..ad3faa11 --- /dev/null +++ b/infrastructure/stacks/triage/security_group.tf @@ -0,0 +1,25 @@ +resource "aws_security_group" "triage_api_lambda_security_group" { + # checkov:skip=CKV2_AWS_5: False positive due to module reference + name = "${local.resource_prefix}-${var.processor_lambda_name}${local.workspace_suffix}-sg" + description = "Security group for processor lambda" + + vpc_id = data.aws_vpc.vpc.id +} + +resource "aws_vpc_security_group_egress_rule" "allow_dynamodb_access_from_organisation_api" { + security_group_id = aws_security_group.triage_api_lambda_security_group.id + description = "Organisation api egress rule to allow DynamoDB traffic" + prefix_list_id = data.aws_prefix_list.dynamodb.id + ip_protocol = "tcp" + from_port = var.https_port + to_port = var.https_port +} + +resource "aws_vpc_security_group_egress_rule" "allow_s3_access_from_processor_lambda" { + security_group_id = aws_security_group.triage_api_lambda_security_group.id + description = "Processor lambda egress rule to allow S3 traffic" + prefix_list_id = data.aws_prefix_list.s3.id + ip_protocol = "tcp" + from_port = var.https_port + to_port = var.https_port +} diff --git a/infrastructure/stacks/triage/trigger.tf b/infrastructure/stacks/triage/trigger.tf new file mode 100644 index 00000000..3311e8b4 --- /dev/null +++ b/infrastructure/stacks/triage/trigger.tf @@ -0,0 +1,30 @@ +# S3 Lambda trigger for s3lambda +resource "aws_lambda_permission" "allows3" { + statement_id = "AllowExecutionFromS3" + action = "lambda:InvokeFunction" + function_name = module.s3lambda.lambda_function_name + principal = "s3.amazonaws.com" + source_arn = module.pathway_artifact_bucket.s3_bucket_arn +} + +resource "aws_s3_bucket_notification" "bucket_notification" { + bucket = module.pathway_artifact_bucket.s3_bucket_id + + lambda_function { + lambda_function_arn = module.s3lambda.lambda_function_arn + events = ["s3:ObjectCreated:*"] + #filter_prefix = "uploads/" + } + + depends_on = [aws_lambda_permission.allows3] +} + +# API Gateway Lambda trigger for apiglambda +resource "aws_lambda_permission" "allowapig" { + statement_id = "AllowExecutionFromAPIGateway" + action = "lambda:InvokeFunction" + function_name = module.apiglambda.lambda_function_name + principal = "apigateway.amazonaws.com" + source_arn = "${aws_api_gateway_rest_api.triage.execution_arn}/*/${local.api_method}/${join("/", local.path_parts)}" + qualifier = "live" +} diff --git a/infrastructure/stacks/triage/variables.tf b/infrastructure/stacks/triage/variables.tf new file mode 100644 index 00000000..f6541813 --- /dev/null +++ b/infrastructure/stacks/triage/variables.tf @@ -0,0 +1,140 @@ +# variable "project_name" { +# description = "Project name prefix" +# type = string +# } + +# DynamoDB vars +# variable "triage_table_name" { +# type = string +# } + +# variable "billing" { +# type = string +# default = "PAY_PER_REQUEST" +# } + +# IAM vars +# variable "name" { +# type = string +# } + +# variable "table_arns" { +# description = "DynamoDB table ARNs" +# type = list(string) +# } + +# Lambda vars +# variable "function_name" { +# type = string +# } + +# variable "handler" { +# type = string +# } + +variable "runtime" { + type = string +} + +variable "s3_key" { + type = string +} + +# variable "lambda_role" { +# type = string +# } + +variable "mem_size" { + type = number +} + +# variable "body_map_table" { +# type = string +# default = null +# } + +# variable "starting_node_table" { +# type = string +# default = null +# } + +# variable "triage_node_table" { +# type = string +# default = null +# } + +# variable "policy_jsons" { +# description = "List of JSON policies for Lambda" +# type = list(string) +# default = [] +# } + +variable "vpc_private_subnet_cidr_range" { + description = "The CIDR range for the VPC private subnets" + type = string + default = "21" +} + +variable "processor_lambda_name" { + description = "The name of the processor lambda function" +} + +# REST API vars +variable "api_name" { + type = string +} + +variable "type" { + type = string + default = "AWS_PROXY" +} + +variable "int_http_method" { + type = string + default = "POST" +} + +variable "stage_name" { + type = string +} + +variable "authorization" { + type = string +} + +# Trigger vars +# variable "statement_id" { +# type = string +# } + +# variable "lambda_name" { +# type = string +# } + +# variable "principal" { +# type = string +# } + +# variable "bucket_arn" { +# type = string +# default = null +# } + +# variable "lambda_arn" { +# type = string +# } + +# variable "events" { +# type = list(string) +# default = [] +# } + +# variable "filter_prefix" { +# type = string +# default = null +# } + +# variable "api_gateway_source_arn" { +# type = string +# default = null +# } diff --git a/infrastructure/terraform_management.tfvars b/infrastructure/terraform_management.tfvars new file mode 100644 index 00000000..1bc18b6e --- /dev/null +++ b/infrastructure/terraform_management.tfvars @@ -0,0 +1,2 @@ +s3_versioning = true +s3_logging_bucket_versioning = false diff --git a/infrastructure/triage.tfvars b/infrastructure/triage.tfvars new file mode 100644 index 00000000..511daef8 --- /dev/null +++ b/infrastructure/triage.tfvars @@ -0,0 +1,11 @@ +#Lambda +mem_size = 1024 +runtime = "python3.13" +s3_key = "lambda_function.zip" +processor_lambda_name = "pathways-lambda-processor" + +#Rest API +stage_name = "beta" +api_name = "triage_api" +authorization = "NONE" + diff --git a/scripts/config/pre-commit.yaml b/scripts/config/pre-commit.yaml index 642dcdcd..ea222de6 100644 --- a/scripts/config/pre-commit.yaml +++ b/scripts/config/pre-commit.yaml @@ -1,4 +1,11 @@ repos: +- repo: local + hooks: + - id: check-branch-name + name: Check branch name + entry: ./scripts/githooks/check-branch-name.sh + language: script + pass_filenames: false - repo: local hooks: - id: scan-secrets @@ -36,3 +43,11 @@ repos: name: Lint python entry: make test-lint language: system +- repo: local + hooks: + - id: check-english-usage + name: Check English usage + entry: ./scripts/githooks/check-english-usage.sh + args: ["check=staged-changes"] + language: script + pass_filenames: false diff --git a/scripts/config/vale/styles/config/vocabularies/words/accept.txt b/scripts/config/vale/styles/config/vocabularies/words/accept.txt index 5308c369..0e1f8499 100644 --- a/scripts/config/vale/styles/config/vocabularies/words/accept.txt +++ b/scripts/config/vale/styles/config/vocabularies/words/accept.txt @@ -33,3 +33,4 @@ Prepper dev Populator (?i)rollout +JMeter diff --git a/scripts/config/vale/vale.ini b/scripts/config/vale/vale.ini index 57ae0bb7..b75ed223 100644 --- a/scripts/config/vale/vale.ini +++ b/scripts/config/vale/vale.ini @@ -8,4 +8,4 @@ Vocab = words BasedOnStyles = Vale [architecture/diagrams/**.md] -BasedOnStyles = \ No newline at end of file +BasedOnStyles = diff --git a/scripts/docker/Dockerfile.metadata b/scripts/docker/Dockerfile.metadata index f54092e8..e551e5b2 100644 --- a/scripts/docker/Dockerfile.metadata +++ b/scripts/docker/Dockerfile.metadata @@ -1,4 +1,3 @@ - # === Metadata ================================================================= ARG IMAGE @@ -20,3 +19,11 @@ LABEL \ org.opencontainers.image.revision=$GIT_COMMIT_HASH \ org.opencontainers.image.created=$BUILD_DATE \ org.opencontainers.image.version=$BUILD_VERSION + +# Create a non-root user and switch to it +RUN addgroup --system appgroup && adduser --system --ingroup appgroup appuser +USER appuser + +# Add appropriate healthcheck (below is placeholder checking Python is available) +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD python --version || exit 1 diff --git a/scripts/docker/dgoss.sh b/scripts/docker/dgoss.sh new file mode 100644 index 00000000..e573a48b --- /dev/null +++ b/scripts/docker/dgoss.sh @@ -0,0 +1,139 @@ +#!/bin/bash +# shellcheck disable=SC2016,SC2154,SC2166 + +# WARNING: Please DO NOT edit this file! It is maintained in the Repository Template (https://github.com/nhs-england-tools/repository-template). Raise a PR instead. + +# SEE: https://github.com/goss-org/goss/blob/master/extras/dgoss/dgoss + +set -e + +USAGE="USAGE: $(basename "$0") [run|edit] " +GOSS_FILES_PATH="${GOSS_FILES_PATH:-.}" + +# Container runtime +CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-docker}" + +info() { + echo -e "INFO: $*" >&2; +} +error() { + echo -e "ERROR: $*" >&2; + exit 1; +} + +cleanup() { + set +e + { kill "$log_pid" && wait "$log_pid"; } 2> /dev/null + if [ -n "$CONTAINER_LOG_OUTPUT" ]; then + cp "$tmp_dir/docker_output.log" "$CONTAINER_LOG_OUTPUT" + fi + rm -rf "$tmp_dir" + if [[ $id ]];then + info "Deleting container" + $CONTAINER_RUNTIME rm -vf "$id" > /dev/null + fi +} + +run(){ + # Copy in goss + cp "${GOSS_PATH}" "$tmp_dir/goss" + chmod 755 "$tmp_dir/goss" + [[ -e "${GOSS_FILES_PATH}/${GOSS_FILE:-goss.yaml}" ]] && cp "${GOSS_FILES_PATH}/${GOSS_FILE:-goss.yaml}" "$tmp_dir/goss.yaml" && chmod 644 "$tmp_dir/goss.yaml" + [[ -e "${GOSS_FILES_PATH}/goss_wait.yaml" ]] && cp "${GOSS_FILES_PATH}/goss_wait.yaml" "$tmp_dir" && chmod 644 "$tmp_dir/goss_wait.yaml" + [[ -n "${GOSS_VARS}" ]] && [[ -e "${GOSS_FILES_PATH}/${GOSS_VARS}" ]] && cp "${GOSS_FILES_PATH}/${GOSS_VARS}" "$tmp_dir" && chmod 644 "$tmp_dir/${GOSS_VARS}" + + # Switch between mount or cp files strategy + GOSS_FILES_STRATEGY=${GOSS_FILES_STRATEGY:="mount"} + case "$GOSS_FILES_STRATEGY" in + mount) + info "Starting $CONTAINER_RUNTIME container" + if [ "$CONTAINER_RUNTIME" == "podman" -a $# == 2 ]; then + id=$($CONTAINER_RUNTIME run -d -v "$tmp_dir:/goss:z" "${@:2}" sleep infinity) + else + id=$($CONTAINER_RUNTIME run -d -v "$tmp_dir:/goss:z" "${@:2}") + fi + ;; + cp) + info "Creating $CONTAINER_RUNTIME container" + id=$($CONTAINER_RUNTIME create "${@:2}") + info "Copy goss files into container" + $CONTAINER_RUNTIME cp "$tmp_dir/." "$id:/goss" + info "Starting $CONTAINER_RUNTIME container" + $CONTAINER_RUNTIME start "$id" > /dev/null + ;; + *) error "Wrong goss files strategy used! Correct options are \"mount\" or \"cp\"." + esac + + $CONTAINER_RUNTIME logs -f "$id" > "$tmp_dir/docker_output.log" 2>&1 & + log_pid=$! + info "Container ID: ${id:0:8}" +} + +get_docker_file() { + local cid=$1 # Docker container ID + local src=$2 # Source file path (in the container) + local dst=$3 # Destination file path + + if $CONTAINER_RUNTIME exec "${cid}" sh -c "test -e ${src}" > /dev/null; then + mkdir -p "${GOSS_FILES_PATH}" + $CONTAINER_RUNTIME cp "${cid}:${src}" "${dst}" + info "Copied '${src}' from container to '${dst}'" + fi +} + +# Main +tmp_dir=$(mktemp -d /tmp/tmp.XXXXXXXXXX) +chmod 777 "$tmp_dir" +trap 'ret=$?;cleanup;exit $ret' EXIT + +GOSS_PATH="${GOSS_PATH:-$(which goss 2> /dev/null || true)}" +[[ $GOSS_PATH ]] || { error "Couldn't find goss installation, please set GOSS_PATH to it"; } +[[ ${GOSS_OPTS+x} ]] || GOSS_OPTS="--color --format documentation" +[[ ${GOSS_WAIT_OPTS+x} ]] || GOSS_WAIT_OPTS="-r 30s -s 1s > /dev/null" +GOSS_SLEEP=${GOSS_SLEEP:-0.2} + +[[ $CONTAINER_RUNTIME =~ ^(docker|podman)$ ]] || { error "Runtime must be one of docker or podman"; } + +case "$1" in + run) + run "$@" + if [[ -e "${GOSS_FILES_PATH}/goss_wait.yaml" ]]; then + info "Found goss_wait.yaml, waiting for it to pass before running tests" + if [[ -z "${GOSS_VARS}" ]]; then + if ! $CONTAINER_RUNTIME exec "$id" sh -c "/goss/goss -g /goss/goss_wait.yaml validate $GOSS_WAIT_OPTS"; then + $CONTAINER_RUNTIME logs "$id" >&2 + error "goss_wait.yaml never passed" + fi + else + if ! $CONTAINER_RUNTIME exec "$id" sh -c "/goss/goss -g /goss/goss_wait.yaml --vars='/goss/${GOSS_VARS}' validate $GOSS_WAIT_OPTS"; then + $CONTAINER_RUNTIME logs "$id" >&2 + error "goss_wait.yaml never passed" + fi + fi + fi + [[ $GOSS_SLEEP ]] && { info "Sleeping for $GOSS_SLEEP"; sleep "$GOSS_SLEEP"; } + info "Container health" + if [ "true" != "$($CONTAINER_RUNTIME inspect -f '{{.State.Running}}' "$id")" ]; then + $CONTAINER_RUNTIME logs "$id" >&2 + error "the container failed to start" + fi + info "Running Tests" + if [[ -z "${GOSS_VARS}" ]]; then + $CONTAINER_RUNTIME exec "$id" sh -c "/goss/goss -g /goss/goss.yaml validate $GOSS_OPTS" + else + $CONTAINER_RUNTIME exec "$id" sh -c "/goss/goss -g /goss/goss.yaml --vars='/goss/${GOSS_VARS}' validate $GOSS_OPTS" + fi + ;; + edit) + run "$@" + info "Run goss add/autoadd to add resources" + $CONTAINER_RUNTIME exec -it "$id" sh -c 'cd /goss; PATH="/goss:$PATH" exec sh' + get_docker_file "$id" "/goss/goss.yaml" "${GOSS_FILES_PATH}/${GOSS_FILE:-goss.yaml}" + get_docker_file "$id" "/goss/goss_wait.yaml" "${GOSS_FILES_PATH}/goss_wait.yaml" + if [[ -n "${GOSS_VARS}" ]]; then + get_docker_file "$id" "/goss/${GOSS_VARS}" "${GOSS_FILES_PATH}/${GOSS_VARS}" + fi + ;; + *) + error "$USAGE" +esac diff --git a/scripts/docker/docker.lib.sh b/scripts/docker/docker.lib.sh old mode 100755 new mode 100644 index 5522a726..309a88c6 --- a/scripts/docker/docker.lib.sh +++ b/scripts/docker/docker.lib.sh @@ -24,13 +24,15 @@ set -euo pipefail function docker-build() { local dir=${dir:-$PWD} + # establish if we are using docker or podman + DOCKER_CMD=$(_set_docker_cmd) version-create-effective-file _create-effective-dockerfile tag=$(_get-effective-tag) - docker build \ + $DOCKER_CMD build \ --progress=plain \ --platform linux/amd64 \ --build-arg IMAGE="${DOCKER_IMAGE}" \ @@ -49,12 +51,10 @@ function docker-build() { # Tag the image with all the stated versions, see the documentation for more details for version in $(_get-all-effective-versions) latest; do - if [[ ! -z "$version" ]]; then - docker tag "${tag}" "${DOCKER_IMAGE}:${version}" + if [ ! -z "$version" ]; then + $DOCKER_CMD tag "${tag}" "${DOCKER_IMAGE}:${version}" fi done - - return } # Create the Dockerfile.effective file to bake in version info @@ -66,8 +66,6 @@ function docker-bake-dockerfile() { version-create-effective-file _create-effective-dockerfile - - return } # Run hadolint over the generated Dockerfile. @@ -76,7 +74,6 @@ function docker-bake-dockerfile() { function docker-lint() { local dir=${dir:-$PWD} file=${dir}/Dockerfile.effective ./scripts/docker/dockerfile-linter.sh - return } # Check test Docker image. @@ -88,15 +85,14 @@ function docker-lint() { function docker-check-test() { local dir=${dir:-$PWD} + DOCKER_CMD=$(_set_docker_cmd) # shellcheck disable=SC2086,SC2154 - docker run --rm --platform linux/amd64 \ + $DOCKER_CMD run --rm --platform linux/amd64 \ ${args:-} \ "${DOCKER_IMAGE}:$(_get-effective-version)" 2>/dev/null \ ${cmd:-} \ | grep -q "${check}" && echo PASS || echo FAIL - - return } # Run Docker image. @@ -107,15 +103,14 @@ function docker-check-test() { function docker-run() { local dir=${dir:-$PWD} + DOCKER_CMD=$(_set_docker_cmd) local tag=$(dir="$dir" _get-effective-tag) # shellcheck disable=SC2086 - docker run --rm --platform linux/amd64 \ + $DOCKER_CMD run --rm --platform linux/amd64 \ ${args:-} \ "${tag}" \ ${DOCKER_CMD:-} - - return } # Push Docker image. @@ -124,13 +119,12 @@ function docker-run() { function docker-push() { local dir=${dir:-$PWD} + DOCKER_CMD=$(_set_docker_cmd) # Push all the image tags based on the stated versions, see the documentation for more details for version in $(dir="$dir" _get-all-effective-versions) latest; do - docker push "${DOCKER_IMAGE}:${version}" + $DOCKER_CMD push "${DOCKER_IMAGE}:${version}" done - - return } # Remove Docker resources. @@ -139,16 +133,15 @@ function docker-push() { function docker-clean() { local dir=${dir:-$PWD} + DOCKER_CMD=$(_set_docker_cmd) for version in $(dir="$dir" _get-all-effective-versions) latest; do - docker rmi "${DOCKER_IMAGE}:${version}" > /dev/null 2>&1 ||: + $DOCKER_CMD rmi "${DOCKER_IMAGE}:${version}" > /dev/null 2>&1 ||: done rm -f \ .version \ Dockerfile.effective \ Dockerfile.effective.dockerignore - - return } # Create effective version from the VERSION file. @@ -161,7 +154,7 @@ function version-create-effective-file() { local version_file="$dir/VERSION" local build_datetime=${BUILD_DATETIME:-$(date -u +'%Y-%m-%dT%H:%M:%S%z')} - if [[ -f "$version_file" ]]; then + if [ -f "$version_file" ]; then # shellcheck disable=SC2002 cat "$version_file" | \ sed "s/\(\${yyyy}\|\$yyyy\)/$(date --date="${build_datetime}" -u +"%Y")/g" | \ @@ -173,8 +166,6 @@ function version-create-effective-file() { sed "s/\(\${hash}\|\$hash\)/$(git rev-parse --short HEAD)/g" \ > "$dir/.version" fi - - return } # ============================================================================== @@ -203,36 +194,48 @@ function docker-get-image-version-and-pull() { # match it by name and version regex, if given. local versions_file="${TOOL_VERSIONS:=$(git rev-parse --show-toplevel)/.tool-versions}" local version="latest" - if [[ -f "$versions_file" ]]; then + if [ -f "$versions_file" ]; then line=$(grep "docker/${name} " "$versions_file" | sed "s/^#\s*//; s/\s*#.*$//" | grep "${match_version:-".*"}") - [[ -n "$line" ]] && version=$(echo "$line" | awk '{print $2}') + [ -n "$line" ] && version=$(echo "$line" | awk '{print $2}') fi # Split the image version into two, tag name and digest sha256. local tag="$(echo "$version" | sed 's/@.*$//')" local digest="$(echo "$version" | sed 's/^.*@//')" + DOCKER_CMD=$(_set_docker_cmd) + + local platform="${DOCKER_PLATFORM:-}" + if [ -z "$platform" ]; then + case "$(uname -m)" in + x86_64|amd64) platform="linux/amd64" ;; + aarch64|arm64) platform="linux/arm64" ;; + *) platform="" ;; + esac + fi + local platform_args=() + if [ -n "$platform" ]; then + platform_args=(--platform "$platform") + fi # Check if the image exists locally already - if ! docker images | awk '{ print $1 ":" $2 }' | grep -q "^${name}:${tag}$"; then - if [[ "$digest" != "latest" ]]; then + if ! $DOCKER_CMD images | awk '{ print $1 ":" $2 }' | grep -q "^${name}:${tag}$"; then + if [ "$digest" != "latest" ]; then # Pull image by the digest sha256 and tag it - docker pull \ - --platform linux/amd64 \ + $DOCKER_CMD pull \ + "${platform_args[@]}" \ "${name}@${digest}" \ > /dev/null 2>&1 || true - docker tag "${name}@${digest}" "${name}:${tag}" + $DOCKER_CMD tag "${name}@${digest}" "${name}:${tag}" else # Pull the latest image - docker pull \ - --platform linux/amd64 \ + $DOCKER_CMD pull \ + "${platform_args[@]}" \ "${name}:latest" \ > /dev/null 2>&1 || true fi fi echo "${name}:${version}" - - return } # ============================================================================== @@ -249,14 +252,12 @@ function _create-effective-dockerfile() { # Dockerfile.effective file, otherwise docker won't use it. # See https://docs.docker.com/build/building/context/#filename-and-location # If using podman, this requires v5.0.0 or later. - if [[ -f "${dir}/Dockerfile.dockerignore" ]]; then + if [ -f "${dir}/Dockerfile.dockerignore" ]; then cp "${dir}/Dockerfile.dockerignore" "${dir}/Dockerfile.effective.dockerignore" fi cp "${dir}/Dockerfile" "${dir}/Dockerfile.effective" _replace-image-latest-by-specific-version _append-metadata - - return } # Replace image:latest by a specific version. @@ -269,7 +270,7 @@ function _replace-image-latest-by-specific-version() { local dockerfile="${dir}/Dockerfile.effective" local build_datetime=${BUILD_DATETIME:-$(date -u +'%Y-%m-%dT%H:%M:%S%z')} - if [[ -f "$versions_file" ]]; then + if [ -f "$versions_file" ]; then # First, list the entries specific for Docker to take precedence, then the rest but exclude comments content=$(grep " docker/" "$versions_file"; grep -v " docker/" "$versions_file" ||: | grep -v "^#") echo "$content" | while IFS= read -r line; do @@ -281,7 +282,7 @@ function _replace-image-latest-by-specific-version() { done fi - if [[ -f "$dockerfile" ]]; then + if [ -f "$dockerfile" ]; then # shellcheck disable=SC2002 cat "$dockerfile" | \ sed "s/\(\${yyyy}\|\$yyyy\)/$(date --date="${build_datetime}" -u +"%Y")/g" | \ @@ -297,8 +298,6 @@ function _replace-image-latest-by-specific-version() { # Do not ignore the issue if 'latest' is used in the effective image sed -Ei "/# hadolint ignore=DL3007$/d" "${dir}/Dockerfile.effective" - - return } # Append metadata to the end of Dockerfile. @@ -313,8 +312,6 @@ function _append-metadata() { "$(git rev-parse --show-toplevel)/scripts/docker/Dockerfile.metadata" \ > "$dir/Dockerfile.effective.tmp" mv "$dir/Dockerfile.effective.tmp" "$dir/Dockerfile.effective" - - return } # Print top Docker image version. @@ -325,8 +322,6 @@ function _get-effective-version() { local dir=${dir:-$PWD} head -n 1 "${dir}/.version" 2> /dev/null ||: - - return } # Print the effective tag for the image with the version. If you don't have a VERSION file @@ -337,12 +332,10 @@ function _get-effective-tag() { local tag=$DOCKER_IMAGE version=$(_get-effective-version) - if [[ ! -z "$version" ]]; then + if [ ! -z "$version" ]; then tag="${tag}:${version}" fi echo "$tag" - - return } # Print all Docker image versions. @@ -353,8 +346,6 @@ function _get-all-effective-versions() { local dir=${dir:-$PWD} cat "${dir}/.version" 2> /dev/null ||: - - return } # Print Git branch name. Check the GitHub variables first and then the local Git @@ -363,14 +354,22 @@ function _get-git-branch-name() { local branch_name=$(git rev-parse --abbrev-ref HEAD) - if [[ -n "${GITHUB_HEAD_REF:-}" ]]; then + if [ -n "${GITHUB_HEAD_REF:-}" ]; then branch_name=$GITHUB_HEAD_REF - elif [[ -n "${GITHUB_REF:-}" ]]; then + elif [ -n "${GITHUB_REF:-}" ]; then # shellcheck disable=SC2001 branch_name=$(echo "$GITHUB_REF" | sed "s#refs/heads/##") fi echo "$branch_name" +} + +function get-docker-version() { + DOCKER_CMD=$(_set_docker_cmd) + $DOCKER_CMD -v +} - return +function _set_docker_cmd() { + DOCKER_CMD=$(command -v docker >/dev/null 2>&1 && echo docker || echo podman) + echo "$DOCKER_CMD" } diff --git a/scripts/docker/docker.mk b/scripts/docker/docker.mk index 16104c55..afa8bca5 100644 --- a/scripts/docker/docker.mk +++ b/scripts/docker/docker.mk @@ -35,10 +35,9 @@ clean:: # Remove Docker resources (docker) - optional: docker_dir|dir=[path to t _docker: # Docker command wrapper - mandatory: cmd=[command to execute]; optional: dir=[path to the image directory where the Dockerfile is located, relative to the project's top-level directory, default is '.'] # 'DOCKER_IMAGE' and 'DOCKER_TITLE' are passed to the functions as environment variables - # dir=$(realpath $(or ${dir}, infrastructure/images/${DOCKER_IMAGE})) - # source scripts/docker/docker.lib.sh - # docker-${cmd} # 'dir' is accessible by the function as environment variable - echo Docker images not used yet! + dir=$(realpath $(or ${dir}, infrastructure/images/${DOCKER_IMAGE})) + source scripts/docker/docker.lib.sh + docker-${cmd} # 'dir' is accessible by the function as environment variable # ============================================================================== # Quality checks - please DO NOT edit this section! diff --git a/scripts/docker/dockerfile-linter.sh b/scripts/docker/dockerfile-linter.sh index 061212ec..02ff6acf 100755 --- a/scripts/docker/dockerfile-linter.sh +++ b/scripts/docker/dockerfile-linter.sh @@ -27,8 +27,6 @@ function main() { else file="$file" run-hadolint-in-docker fi - - return } # Run hadolint natively. @@ -38,7 +36,6 @@ function run-hadolint-natively() { # shellcheck disable=SC2001 hadolint "$(echo "$file" | sed "s#$PWD#.#")" - return } # Run hadolint in a Docker container. @@ -48,19 +45,18 @@ function run-hadolint-in-docker() { # shellcheck disable=SC1091 source ./scripts/docker/docker.lib.sh - + DOCKER_CMD=$(_set_docker_cmd) + echo run in "$DOCKER_CMD" # shellcheck disable=SC2155 local image=$(name=hadolint/hadolint docker-get-image-version-and-pull) # shellcheck disable=SC2001 - docker run --rm --platform linux/amd64 \ + $DOCKER_CMD run --rm --platform linux/amd64 \ --volume "$PWD:/workdir" \ --workdir /workdir \ "$image" \ hadolint \ --config /workdir/scripts/config/hadolint.yaml \ "/workdir/$(echo "$file" | sed "s#$PWD#.#")" - - return } # ============================================================================== diff --git a/scripts/docker/tests/Dockerfile b/scripts/docker/tests/Dockerfile deleted file mode 100644 index b5ea5606..00000000 --- a/scripts/docker/tests/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -# `*:latest` will be replaced with a corresponding version stored in the '.tool-versions' file -# hadolint ignore=DL3007 -FROM python:latest diff --git a/scripts/docker/tests/docker.test.sh b/scripts/docker/tests/docker.test.sh index 1092179d..8f487b8f 100755 --- a/scripts/docker/tests/docker.test.sh +++ b/scripts/docker/tests/docker.test.sh @@ -44,17 +44,19 @@ function main() { done echo "Total: ${#tests[@]}, Passed: $(( ${#tests[@]} - status )), Failed: $status" test-docker-suite-teardown - [[ $status -gt 0 ]] && return 1 || return 0 + [ $status -gt 0 ] && return 1 || return 0 } # ============================================================================== function test-docker-suite-setup() { - return + + : } function test-docker-suite-teardown() { - return + + : } # ============================================================================== @@ -104,8 +106,6 @@ function test-docker-test() { output=$(docker-check-test) # Assert echo "$output" | grep -q "PASS" - - return } function test-docker-run() { @@ -116,8 +116,6 @@ function test-docker-run() { output=$(docker-run) # Assert echo "$output" | grep -Eq "Python [0-9]+\.[0-9]+\.[0-9]+" - - return } function test-docker-clean() { @@ -142,8 +140,6 @@ function test-docker-get-image-version-and-pull() { --filter=reference="$name" \ --format "{{.Tag}}" \ | grep -vq "" - - return } # ============================================================================== diff --git a/scripts/githooks/check-branch-name.sh b/scripts/githooks/check-branch-name.sh new file mode 100755 index 00000000..fffd16ec --- /dev/null +++ b/scripts/githooks/check-branch-name.sh @@ -0,0 +1,27 @@ +#!/bin/bash +set -e + +exit_code=0 + +function check_git_branch_name { + BRANCH=$(echo "$1" | tr '[:upper:]' '[:lower:]') + VALID_FORMAT=$(check_git_branch_name_format "$BRANCH") + if [[ ! -z "$VALID_FORMAT" ]] ; then + echo "Branch name $1 does not match the naming pattern" + echo Naming pattern = task or hotfix/hyphenated JIRA ref followed by underscore or hyphen followed by max 45 alphanumerics, hyphens or underscores starting with an alphanumeric + return 1 + fi +} + +function check_git_branch_name_format { + BUILD_BRANCH="$1" + if [ "$BUILD_BRANCH" != 'main' ] && ! [[ $BUILD_BRANCH =~ (hotfix|task)\/(saet|npt)-([0-9]{1,5})(_|-)([A-Za-z0-9])([A-Za-z0-9_-]{9,45})$ ]] ; then + echo 1 + fi +} + +BRANCH_NAME=${BRANCH_NAME:-$(git rev-parse --abbrev-ref HEAD)} +check_git_branch_name "$BRANCH_NAME" + +[ $? != 0 ] && exit_code=1 ||: +exit $exit_code diff --git a/scripts/githooks/check-commit-message.sh b/scripts/githooks/check-commit-message.sh new file mode 100755 index 00000000..994b3a3d --- /dev/null +++ b/scripts/githooks/check-commit-message.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +exit_code=0 +GIT_COMMIT_MESSAGE_MAX_LENGTH=100 + +function check_jira_ref { + BRANCH_NAME=$1 + COMMIT_MESSAGE=$2 + + HYPHENATED_BRANCH_NAME="${BRANCH_NAME//_/-}" + IFS='/' read -r -a name_array <<< "$HYPHENATED_BRANCH_NAME" + IFS='-' read -r -a ref <<< "${name_array[1]}" + JIRA_REF=$(echo "${ref[0]}"-"${ref[1]}" | tr '[:lower:]' '[:upper:]') + + # Add Jira ref after the colon, if missing + if [[ "$COMMIT_MESSAGE" =~ ^(feat|fix|chore|docs|style|refactor|perf|test|ci|build|revert|style)(\([a-z0-9_-]+\))?:[[:space:]] ]] && + [[ "$JIRA_REF" =~ ^(SAET)-[0-9]+$ ]] && + [[ "$COMMIT_MESSAGE" != *"$JIRA_REF"* ]]; then + COMMIT_MESSAGE=$(echo "$COMMIT_MESSAGE" | sed -E "s/^((feat|fix|chore|docs|style|refactor|perf|test|ci|build|revert|style)(\([a-z0-9_-]+\))?:)/\1 $JIRA_REF/") + fi + + echo $COMMIT_MESSAGE +} + +function check_commit_message_format { + COMMIT_MESSAGE="$1" + local REGEX='^(feat|fix|chore|docs|style|refactor|perf|test|ci|build|revert|style)(\([a-z0-9_-]+\))?: (SAET)-[0-9]+ .+' + + if ! [[ $COMMIT_MESSAGE =~ $REGEX ]]; then + echo -e "\033[0;31mInvalid conventional commit message format! Expected: (): \033[0m" + return 1 + fi +} + +function check_commit_message_length { + COMMIT_MESSAGE="$1" + COMMIT_MESSAGE_LENGTH="$(echo $COMMIT_MESSAGE | sed s/\'//g | head -1 | wc -m)" + + if [[ "$COMMIT_MESSAGE_LENGTH" -gt $GIT_COMMIT_MESSAGE_MAX_LENGTH ]] ; then + echo "At $COMMIT_MESSAGE_LENGTH characters the commit message exceeds limit of $GIT_COMMIT_MESSAGE_MAX_LENGTH" + fi +} + +function check_git_commit_message { + COMMIT_MESSAGE=$1 + + VALID_FORMAT=$(check_commit_message_format "$COMMIT_MESSAGE") + VALID_LENGTH=$(check_commit_message_length "$COMMIT_MESSAGE") + + if [[ ! -z "$VALID_LENGTH" || ! -z "$VALID_FORMAT" ]] ; then + [[ ! -z "$VALID_FORMAT" ]] && echo $VALID_FORMAT + [[ ! -z "$VALID_LENGTH" ]] && echo $VALID_LENGTH + return 1 + fi +} + +# ---- MAIN EXECUTION ---- +ORIGINAL_COMMIT_MESSAGE=${COMMIT_MESSAGE:-"$(cat $1)"} +BRANCH_NAME=${BRANCH_NAME:-$(git rev-parse --abbrev-ref HEAD)} +COMMIT_MESSAGE=$(check_jira_ref "$BRANCH_NAME" "$ORIGINAL_COMMIT_MESSAGE") + +# Update commit message file +sed -i -e "s/$ORIGINAL_COMMIT_MESSAGE/$COMMIT_MESSAGE/g" $1 + +check_git_commit_message "$(cat $1)" +exit_code=$? +exit $exit_code diff --git a/scripts/githooks/check-english-usage.sh b/scripts/githooks/check-english-usage.sh index 1c0c2064..10b9d659 100755 --- a/scripts/githooks/check-english-usage.sh +++ b/scripts/githooks/check-english-usage.sh @@ -1,4 +1,3 @@ - #!/bin/bash # WARNING: Please, DO NOT edit this file! It is maintained in the Repository Template (https://github.com/nhs-england-tools/repository-template). Raise a PR instead. @@ -62,10 +61,19 @@ function main() { # filter=[git command to filter the files to check] function run-vale-natively() { - # shellcheck disable=SC2046 + # Read files into an array to handle spaces in filenames + files=() + while IFS= read -r file; do + files+=("$file") + done < <($filter) + if [ ${#files[@]} -eq 0 ]; then + echo "No files to check" + return 0 + fi + vale \ --config "$PWD/scripts/config/vale/vale.ini" \ - $($filter) + "${files[@]}" return } @@ -82,17 +90,28 @@ function run-vale-in-docker() { echo run in "$DOCKER_CMD" # shellcheck disable=SC2155 local image=$(name=jdkato/vale docker-get-image-version-and-pull) + + # Read files into an array to handle spaces in filenames + files=() + while IFS= read -r file; do + files+=("$file") + done < <($filter) + # We use /dev/null here to stop `vale` from complaining that it's # not been called correctly if the $filter happens to return an # empty list. As long as there's a filename, even if it's one that # will be ignored, `vale` is happy. - # shellcheck disable=SC2046,SC2086 + if [ ${#files[@]} -eq 0 ]; then + files=(/dev/null) + fi + + # shellcheck disable=SC2086 $DOCKER_CMD run --rm --platform linux/amd64 \ --volume "$PWD:/workdir" \ --workdir /workdir \ "$image" \ --config /workdir/scripts/config/vale/vale.ini \ - $($filter) /dev/null + "${files[@]}" return } diff --git a/scripts/githooks/commit-msg b/scripts/githooks/commit-msg new file mode 100755 index 00000000..64c095a0 --- /dev/null +++ b/scripts/githooks/commit-msg @@ -0,0 +1,12 @@ +#!/bin/sh +# +# Script to check the commit log message contains jira reference +# at start and inserts if not +# Called by "git commit" with one argument, the name of the file +# that has the commit message. The hook should exit with non-zero +# status after issuing an appropriate message if it wants to stop the +# commit. + +# This script is copied to the .git/hooks directory by the make config target + +scripts/githooks/check-commit-message.sh $* diff --git a/scripts/terraform/terraform.mk b/scripts/terraform/terraform.mk index c1112c91..7e245acf 100644 --- a/scripts/terraform/terraform.mk +++ b/scripts/terraform/terraform.mk @@ -7,6 +7,12 @@ TF_ENV ?= dev STACK ?= ${stack} TERRAFORM_STACK ?= $(or ${STACK}, infrastructure/environments/${TF_ENV}) +# If STACK is set and doesn't contain a path separator, assume it's a stack name under infrastructure/stacks/ +ifneq ($(findstring /,$(STACK)),/) + ifneq ($(STACK),) + TERRAFORM_STACK := infrastructure/stacks/${STACK} + endif +endif dir ?= ${TERRAFORM_STACK} terraform-init: # Initialise Terraform - optional: terraform_dir|dir=[path to a directory where the command will be executed, relative to the project's top-level directory, default is one of the module variables or the example directory, if not set], terraform_opts|opts=[options to pass to the Terraform init command, default is none/empty] @Development diff --git a/scripts/terraform/terraform.sh b/scripts/terraform/terraform.sh index f388c9a9..6e0a64f3 100755 --- a/scripts/terraform/terraform.sh +++ b/scripts/terraform/terraform.sh @@ -49,10 +49,23 @@ function run-terraform-in-docker() { # shellcheck disable=SC1091 source ./scripts/docker/docker.lib.sh + local platform="${DOCKER_PLATFORM:-}" + if [ -z "$platform" ]; then + case "$(uname -m)" in + x86_64|amd64) platform="linux/amd64" ;; + aarch64|arm64) platform="linux/arm64" ;; + *) platform="" ;; + esac + fi + local platform_args=() + if [ -n "$platform" ]; then + platform_args=(--platform "$platform") + fi + # shellcheck disable=SC2155 local image=$(name=hashicorp/terraform docker-get-image-version-and-pull) # shellcheck disable=SC2086 - docker run --rm --platform linux/amd64 \ + docker run --rm "${platform_args[@]}" \ --volume "$PWD":/workdir \ --workdir /workdir \ "$image" \ diff --git a/scripts/tests/test.mk b/scripts/tests/test.mk index 42c6593a..4fd72b06 100644 --- a/scripts/tests/test.mk +++ b/scripts/tests/test.mk @@ -76,12 +76,12 @@ test: # Run all the test tasks @Testing test-load _test: - set -e - script="./scripts/tests/${name}.sh" - if [ -e "$${script}" ]; then - exec $${script} - else - echo "make test-${name} not implemented: $${script} not found" >&2 + set -e; \ + script="./scripts/tests/${name}.sh"; \ + if [ -e "$${script}" ]; then \ + exec $${script}; \ + else \ + echo "make test-${name} not implemented: $${script} not found" >&2; \ fi ${VERBOSE}.SILENT: \ diff --git a/scripts/workflow/action-infra-stack.sh b/scripts/workflow/action-infra-stack.sh index 73f9bd89..e2162a1e 100644 --- a/scripts/workflow/action-infra-stack.sh +++ b/scripts/workflow/action-infra-stack.sh @@ -51,17 +51,17 @@ export TF_VAR_terraform_lock_table_name="nhse-$ENVIRONMENT-$TF_VAR_repo_name-ter # check exports have been done EXPORTS_SET=0 # Check key variables have been exported - see above -if [[ -z "$ACTION" ]] ; then +if [ -z "$ACTION" ] ; then echo Set ACTION to terraform action one of plan, apply, destroy, or validate EXPORTS_SET=1 fi -if [[ -z "$STACK" ]] ; then +if [ -z "$STACK" ] ; then echo Set STACK to name of the stack to be actioned EXPORTS_SET=1 fi -if [[ -z "$ENVIRONMENT" ]] ; then +if [ -z "$ENVIRONMENT" ] ; then echo Set ENVIRONMENT to the environment to action the terraform in - one of dev, test, preprod, prod EXPORTS_SET=1 else @@ -71,7 +71,7 @@ else fi fi -# if [[ -z "$PROJECT" ]] ; then +# if [ -z "$PROJECT" ] ; then # echo Set PROJECT to dos or cm # EXPORTS_SET=1 # else @@ -81,17 +81,17 @@ fi # fi # fi -if [[ -z "$WORKSPACE" ]] ; then +if [ -z "$WORKSPACE" ] ; then echo Set WORKSPACE to the workspace to action the terraform in EXPORTS_SET=1 fi -if [[ -z "$TF_VAR_repo_name" ]] ; then +if [ -z "$TF_VAR_repo_name" ] ; then echo Set REPOSITORY to the REPOSITORY to action the terraform in EXPORTS_SET=1 fi -if [[ $EXPORTS_SET = 1 ]] ; then +if [ $EXPORTS_SET = 1 ] ; then echo One or more exports not set exit 1 fi @@ -138,7 +138,7 @@ fi cd "$STACK_DIR" || exit # if no stack-specific tfvars create temporary one TEMP_STACK_TF_VARS_FILE=0 -if [[ ! -f "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" ]] ; then +if [ ! -f "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" ] ; then touch "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" TEMP_STACK_TF_VARS_FILE=1 fi @@ -155,7 +155,7 @@ fi # if no env specific tfvars for stack create temporary one TEMP_ENV_STACK_TF_VARS_FILE=0 -if [[ ! -f "$ENVIRONMENTS_DIR/$STACK_TF_VARS_FILE" ]] ; then +if [ ! -f "$ENVIRONMENTS_DIR/$STACK_TF_VARS_FILE" ] ; then touch "$ENVIRONMENTS_DIR/$STACK_TF_VARS_FILE" TEMP_ENV_STACK_TF_VARS_FILE=1 fi @@ -164,8 +164,17 @@ terraform-initialise terraform workspace select -or-create "$WORKSPACE" +if [ "$STACK" = "artefact_management" ] ; then + echo "Exporting account ID for artefact_management stack" + if [ -z "$AWS_ACCOUNT_ID_DEV" ] ; then + echo "AWS_ACCOUNT_ID_DEV environment variable must be set for artefact_management stack" + exit 1 + fi + export TF_VAR_aws_account_id_dev=$AWS_ACCOUNT_ID_DEV +fi + # plan -if [[ -n "$ACTION" ] && [ "$ACTION" = 'plan' ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'plan' ] ; then terraform plan -out $STACK.tfplan \ -var-file "$ROOT_DIR/$INFRASTRUCTURE_DIR/$COMMON_TF_VARS_FILE" \ -var-file "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" \ @@ -174,7 +183,7 @@ if [[ -n "$ACTION" ] && [ "$ACTION" = 'plan' ]] ; then PLAN_RESULT=$(terraform show -no-color $STACK.tfplan) - if [[ -n "$GITHUB_WORKSPACE" ]] ; then + if [ -n "$GITHUB_WORKSPACE" ] ; then cp "$STACK.tfplan" "$GITHUB_WORKSPACE/$STACK.tfplan" # Look for the "No changes" string in the output for GitHub workflow. @@ -188,15 +197,15 @@ if [[ -n "$ACTION" ] && [ "$ACTION" = 'plan' ]] ; then fi fi -if [[ -n "$ACTION" ] && [ "$ACTION" = 'apply' ]] ; then - if [[ -n "$GITHUB_WORKSPACE" ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'apply' ] ; then + if [ -n "$GITHUB_WORKSPACE" ] ; then terraform apply -auto-approve "$GITHUB_WORKSPACE/$STACK.tfplan" else terraform apply -auto-approve "$STACK.tfplan" fi fi -if [[ -n "$ACTION" ] && [ "$ACTION" = 'destroy' ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'destroy' ] ; then terraform destroy -auto-approve \ -var-file "$ROOT_DIR/$INFRASTRUCTURE_DIR/$COMMON_TF_VARS_FILE" \ -var-file "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" \ @@ -204,7 +213,7 @@ if [[ -n "$ACTION" ] && [ "$ACTION" = 'destroy' ]] ; then -var-file "$ENVIRONMENTS_DIR/$STACK_TF_VARS_FILE" fi -if [[ -n "$ACTION" ] && [ "$ACTION" = 'validate' ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'validate' ] ; then terraform validate fi @@ -214,11 +223,11 @@ rm -f "$STACK_DIR"/provider.tf rm -f "$STACK_DIR"/versions.tf rm -f "$STACK_DIR"/common-variables.tf -if [[ $TEMP_STACK_TF_VARS_FILE = 1 ]] ; then +if [ $TEMP_STACK_TF_VARS_FILE = 1 ] ; then rm -f "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" fi -if [[ $TEMP_ENV_STACK_TF_VARS_FILE = 1 ]] ; then +if [ $TEMP_ENV_STACK_TF_VARS_FILE = 1 ] ; then rm -f "$ENVIRONMENTS_DIR/$STACK_TF_VARS_FILE" fi diff --git a/scripts/workflow/boostrapper.sh b/scripts/workflow/boostrapper.sh index b4b4271b..4afa72d3 100755 --- a/scripts/workflow/boostrapper.sh +++ b/scripts/workflow/boostrapper.sh @@ -10,7 +10,7 @@ set -e # - They are NOT set in this script to avoid details being stored in repo export ACTION="${ACTION:-"apply"}" # default action is plan export AWS_REGION="${AWS_REGION:-"eu-west-2"}" # The AWS region into which you intend to deploy the application (where the terraform bucket will be created) eg eu-west-2 -export ENVIRONMENT="${ENVIRONMENT:-"dev"}" # Identify the environment (one of dev,test,security,preprod or prod) usually part of the account name +export ENVIRONMENT="${ENVIRONMENT:-"mgmt"}" # Identify the environment (one of dev,test,security,preprod or prod) usually part of the account name export PROJECT="${PROJECT:-"saet"}" export TF_VAR_repo_name="${REPOSITORY:-"$(basename -s .git "$(git config --get remote.origin.url)")"}" export TF_VAR_terraform_state_bucket_name="nhse-$ENVIRONMENT-$TF_VAR_repo_name-terraform-state" # globally unique name @@ -123,6 +123,14 @@ function terraform-initialise { function github_runner_stack { # now do account_wide stack for github runner and for oidc provider + # ------------- Step three create thumbprint for github actions ----------- + export HOST=$(curl https://token.actions.githubusercontent.com/.well-known/openid-configuration) + export CERT_URL=$(jq -r '.jwks_uri | split("/")[2]' <<< $HOST) + export THUMBPRINT=$(echo | openssl s_client -servername "$CERT_URL" -showcerts -connect "$CERT_URL":443 2> /dev/null | tac | sed -n '/-----END CERTIFICATE-----/,/-----BEGIN CERTIFICATE-----/p; /-----BEGIN CERTIFICATE-----/q' | tac | openssl x509 -sha1 -fingerprint -noout | sed 's/://g' | awk -F= '{print tolower($2)}') + # ------------- Step four create oidc identity provider, github runner role and policies for that role ----------- + export TF_VAR_oidc_provider_url="https://token.actions.githubusercontent.com" + export TF_VAR_oidc_thumbprint=$THUMBPRINT + export TF_VAR_oidc_client="sts.amazonaws.com" export STACK=github_runner TF_VAR_stack_name=$(echo "$STACK" | tr '_' '-' ) export TF_VAR_stack_name @@ -162,14 +170,14 @@ function github_runner_stack { # init terraform terraform-initialise - if [[ -n "$ACTION" ] && [ "$ACTION" = 'plan' ]] ; then + if [ -n "$ACTION" ] && [ "$ACTION" = 'plan' ] ; then terraform plan \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$COMMON_TF_VARS_FILE \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$STACK_TF_VARS_FILE \ -var-file "$ENVIRONMENTS_DIR/$ENV_TF_VARS_FILE" fi - if [[ -n "$ACTION" ] && [ "$ACTION" = 'apply' ]] ; then + if [ -n "$ACTION" ] && [ "$ACTION" = 'apply' ] ; then terraform apply -auto-approve \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$COMMON_TF_VARS_FILE \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$STACK_TF_VARS_FILE \ @@ -180,7 +188,7 @@ function github_runner_stack { rm -f "$STACK_DIR"/locals.tf rm -f "$STACK_DIR"/provider.tf rm -f "$STACK_DIR"/versions.tf - if [[ $TEMP_STACK_TF_VARS_FILE == 1 ]]; then + if [ $TEMP_STACK_TF_VARS_FILE == 1 ]; then rm "$ROOT_DIR/$INFRASTRUCTURE_DIR/$STACK_TF_VARS_FILE" fi @@ -221,19 +229,19 @@ fi # init terraform terraform-initialise -if [[ -n "$ACTION" ] && [ "$ACTION" = 'plan' ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'plan' ] ; then terraform plan \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$COMMON_TF_VARS_FILE \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$STACK_TF_VARS_FILE \ -var-file "$ENVIRONMENTS_DIR/$ENV_TF_VARS_FILE" fi -if [[ -n "$ACTION" ] && [ "$ACTION" = 'apply' ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'apply' ] ; then terraform apply -auto-approve \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$COMMON_TF_VARS_FILE \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$STACK_TF_VARS_FILE \ -var-file "$ENVIRONMENTS_DIR/$ENV_TF_VARS_FILE" fi -if [[ -n "$ACTION" ] && [ "$ACTION" = 'destroy' ]] ; then +if [ -n "$ACTION" ] && [ "$ACTION" = 'destroy' ] ; then terraform destroy -auto-approve \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$COMMON_TF_VARS_FILE \ -var-file "$ROOT_DIR"/"$INFRASTRUCTURE_DIR"/$STACK_TF_VARS_FILE \ diff --git a/scripts/workflow/check-terraform-state.sh b/scripts/workflow/check-terraform-state.sh new file mode 100755 index 00000000..25105174 --- /dev/null +++ b/scripts/workflow/check-terraform-state.sh @@ -0,0 +1,35 @@ +#! /bin/bash + +# fail on first error +set -e +EXPORTS_SET=0 + +# check necessary environment variables are set +if [ -z "$WORKSPACE" ] ; then + echo Set WORKSPACE + EXPORTS_SET=1 +fi + +if [ -z "$ENVIRONMENT" ] ; then + echo Set ENVIRONMENT + EXPORTS_SET=1 +fi + +if [ $EXPORTS_SET = 1 ] ; then + echo One or more exports not set + exit 1 +fi + +export TF_VAR_repo_name="${REPOSITORY:-"$(basename -s .git "$(git config --get remote.origin.url)")"}" +export TERRAFORM_BUCKET_NAME="nhse-$ENVIRONMENT-$TF_VAR_repo_name-terraform-state" + +echo "Checking for terraform workspace --> $WORKSPACE" +echo "Terraform state S3 bucket name being checked --> $TERRAFORM_BUCKET_NAME" + +CLEARED=$(aws s3 ls s3://$TERRAFORM_BUCKET_NAME/env:/$WORKSPACE/ | awk '{print $2}') +if [ -n "$CLEARED" ] ; then + echo "Not all state cleared - $CLEARED" + exit 1 +else + echo All state entry for $WORKSPACE removed +fi diff --git a/scripts/workflow/cleardown-artefacts.sh b/scripts/workflow/cleardown-artefacts.sh new file mode 100755 index 00000000..3b0c3279 --- /dev/null +++ b/scripts/workflow/cleardown-artefacts.sh @@ -0,0 +1,38 @@ +#! /bin/bash + +# fail on first error +set -e +EXPORTS_SET=0 + +# check necessary environment variables are set +if [ -z "$WORKSPACE" ] ; then + echo Set WORKSPACE + EXPORTS_SET=1 +fi + +if [ -z "$ARTEFACT_BUCKET_NAME" ] ; then + echo Set ARTEFACT_BUCKET_NAME + EXPORTS_SET=1 +fi + +if [ $EXPORTS_SET = 1 ] ; then + echo One or more exports not set + exit 1 +fi + +if [ "$WORKSPACE" == "default" ] ; then + echo WORKSPACE can not be default + exit 1 +fi + +echo "Clearing down artefacts at or below $ARTEFACT_BUCKET_NAME/$WORKSPACE" + +deletion_output=$(aws s3 rm --recursive s3://$ARTEFACT_BUCKET_NAME/$WORKSPACE/ 2>&1) + +if [ -n "$deletion_output" ]; then + echo "Sucessfully deleted following artefacts from $ARTEFACT_BUCKET_NAME/$WORKSPACE" + echo "$deletion_output" +else + echo "Problem deleting artefacts at $ARTEFACT_BUCKET_NAME/$WORKSPACE. Does targetted folder exist?" + echo "$deletion_output" +fi diff --git a/scripts/workflow/derive-workspace.sh b/scripts/workflow/derive-workspace.sh new file mode 100755 index 00000000..7ea1ca4a --- /dev/null +++ b/scripts/workflow/derive-workspace.sh @@ -0,0 +1,60 @@ +#! /bin/bash +echo "Trigger: $TRIGGER" +echo "Trigger action: $TRIGGER_ACTION" +echo "Trigger reference: $TRIGGER_REFERENCE" +echo "Trigger head reference: $TRIGGER_HEAD_REFERENCE " +echo "Trigger event reference $TRIGGER_EVENT_REF" +echo "Commit hash (for dependabot only): $COMMIT_HASH" + +WORKSPACE="Unknown" + +# If we are dealing with a tagging action, then the workspace is the name of the tag +if [ "$TRIGGER" == "tag" ] ; then + WORKSPACE="$TRIGGER_REFERENCE" + echo "Triggered by tagging - deriving workspace directly from tag: $TRIGGER_REFERENCE" + echo "Workspace: $WORKSPACE" + export WORKSPACE + exit +fi + +# If we are dealing with a push action or workflow_dispatch and the trigger is not a tag, we'll need to look at the branch name +# to derive the workspace +if [ "$TRIGGER_ACTION" == "push" ] || [ "$TRIGGER_ACTION" == "workflow_dispatch" ] ; then + echo "Triggered by a push or workflow_dispatch - branch name is current branch" + BRANCH_NAME="${BRANCH_NAME:-$(git rev-parse --abbrev-ref HEAD)}" +# If trigger action is pull_request we will need to derive the workspace from the triggering head reference +elif [ "$TRIGGER_ACTION" == "pull_request" ] ; then + echo "Triggered by a pull_request - setting branch name to trigger head ref " + BRANCH_NAME="$TRIGGER_HEAD_REFERENCE" +# If trigger action is delete (of branch) we will need to derive the workspace from the event ref +elif [ "$TRIGGER_ACTION" == "delete" ] ; then + echo "Triggered by a branch deletion - setting branch name to trigger event ref " + BRANCH_NAME="$TRIGGER_EVENT_REF" +fi + +echo "Branch name: $BRANCH_NAME" +BRANCH_NAME=$(echo "$BRANCH_NAME" | sed 's/refs\/heads\/task/task/g; s/refs\/heads\/dependabot/dependabot/g') + +if [[ "${BRANCH_NAME:0:10}" == "dependabot" ]]; then + # Handle dependabot branches + WORKSPACE="bot-$COMMIT_HASH" + echo "Workspace from dependabot branch: $WORKSPACE" +elif [[ "$BRANCH_NAME" == "main" ]]; then + # Handle main branch + WORKSPACE="default" + echo "Workspace from main branch: $WORKSPACE" +elif [[ "$BRANCH_NAME" == "develop" ]]; then + # Handle develop branch + WORKSPACE="default" + echo "Workspace from develop branch: $WORKSPACE" +else + # Handle task branches + IFS='/' read -r -a name_array <<< "$BRANCH_NAME" + IFS='_-' read -r -a ref <<< "${name_array[1]}" + WORKSPACE=$(echo "${ref[0]}-${ref[1]}" | tr "[:upper:]" "[:lower:]") + echo "Workspace from feature branch: $WORKSPACE" +fi + +echo "Branch name: $BRANCH_NAME" +echo "Workspace: $WORKSPACE" +export WORKSPACE diff --git a/scripts/workflow/generate-feature-flags.sh b/scripts/workflow/generate-feature-flags.sh new file mode 100644 index 00000000..161f6d13 --- /dev/null +++ b/scripts/workflow/generate-feature-flags.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +# Script to generate AWS AppConfig Feature Flags JSON from the toggle registry +# This script reads the toggle registry YAML and generates environment-specific feature flags + +# fail on first error +set -e + +# Determine script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)" + +# Default values +TOGGLE_REGISTRY_FILE="${TOGGLE_REGISTRY_FILE:-"$ROOT_DIR/infrastructure/toggles/toggle-registry.yaml"}" +OUTPUT_FILE="${OUTPUT_FILE:-"$ROOT_DIR/infrastructure/toggles/feature-flags.json"}" +CREATED_DATE="${CREATED_DATE:-$(date -u +"%Y-%m-%dT%H:%M:%SZ")}" + +# Validate required environment variables +if [ -z "$ENVIRONMENT" ]; then + echo "Error: ENVIRONMENT environment variable is required" + echo "Usage: ENVIRONMENT=dev $0" + exit 1 +fi + +# Check if toggle registry file exists +if [ ! -f "$TOGGLE_REGISTRY_FILE" ]; then + echo "Error: Toggle registry file not found: $TOGGLE_REGISTRY_FILE" + exit 1 +fi + +echo "======================================" +echo "Generating Feature Flags" +echo "======================================" +echo "Environment: $ENVIRONMENT" +echo "Toggle Registry: $TOGGLE_REGISTRY_FILE" +echo "Output File: $OUTPUT_FILE" +echo "Created Date: $CREATED_DATE" +echo "======================================" + +# Generate the feature flags JSON using Python script +GENERATED_FILE=$(ENVIRONMENT="$ENVIRONMENT" \ + TOGGLE_REGISTRY_FILE="$TOGGLE_REGISTRY_FILE" \ + OUTPUT_FILE="$OUTPUT_FILE" \ + CREATED_DATE="$CREATED_DATE" \ + python3 "$SCRIPT_DIR/generate_feature_flags.py") + +if [ $? -eq 0 ]; then + echo "✓ Feature flags generated successfully: $GENERATED_FILE" + + # Display a summary of the generated file + if [ -f "$GENERATED_FILE" ]; then + FLAG_COUNT=$(python3 -c "import json; f=open('$GENERATED_FILE'); data=json.load(f); print(len(data.get('flags', {})))") + echo "✓ Total flags generated: $FLAG_COUNT" + + # Show enabled flags + ENABLED_COUNT=$(python3 -c "import json; f=open('$GENERATED_FILE'); data=json.load(f); print(sum(1 for v in data.get('values', {}).values() if v.get('enabled')))") + echo "✓ Flags enabled in $ENVIRONMENT: $ENABLED_COUNT" + fi +else + echo "✗ Failed to generate feature flags" + exit 1 +fi + +echo "======================================" diff --git a/tests/exploratory/PatientTriageExploratoryTest.jmx b/tests/exploratory/PatientTriageExploratoryTest.jmx index 7103b93d..7d6611f9 100644 --- a/tests/exploratory/PatientTriageExploratoryTest.jmx +++ b/tests/exploratory/PatientTriageExploratoryTest.jmx @@ -193,7 +193,7 @@ vars.put("CONFIG_PARTY", PARTIES[rand.nextInt(PARTIES.length)]); true previousAnswers = vars.get("ANSWERS"); if (previousAnswers == null || previousAnswers.equals("void")) { - previousAnswers = ""; + previousAnswers = ""; } previousAnswers = previousAnswers + ''', {"type": {"coding": [{"code": "%QUESTIONID%"}]},"valueCoding": {"code": "%ANSWERNUM%"}}''' @@ -201,7 +201,7 @@ previousAnswers = previousAnswers + ''', .replaceAll("%ANSWERNUM%", vars.get("answer")); if (previousAnswers[0] == ',') { - previousAnswers = previousAnswers.substring(1); + previousAnswers = previousAnswers.substring(1); } vars.put("ANSWERS", previousAnswers); @@ -238,7 +238,7 @@ vars.put("ANSWERS", previousAnswers); if (!prev.isResponseCodeOK()) { - vars.put("resources_ALL", ""); + vars.put("resources_ALL", ""); } groovy