Browse Source

feat: re-implement e2e managed tests (#5444)

Signed-off-by: Moritz Johner <beller.moritz@googlemail.com>
Moritz Johner 6 months ago
parent
commit
49debe8ceb
62 changed files with 1069 additions and 951 deletions
  1. 0 175
      .github/actions/e2e-managed/action.yml
  2. 1 1
      .github/actions/sign/action.yml
  3. 1 1
      .github/workflows/ci.yml
  4. 345 76
      .github/workflows/e2e-managed.yml
  5. 0 1
      .github/workflows/ok-to-test-managed.yml
  6. 23 17
      Makefile
  7. 10 0
      terraform/aws/main.tf
  8. 55 0
      terraform/aws/infrastructure/modules/cluster/irsa.tf
  9. 100 0
      terraform/aws/infrastructure/modules/cluster/main.tf
  10. 44 0
      terraform/aws/infrastructure/modules/cluster/outputs.tf
  11. 31 0
      terraform/aws/infrastructure/modules/cluster/provider.tf
  12. 5 0
      terraform/aws/modules/cluster/variables.tf
  13. 9 0
      terraform/aws/infrastructure/provider.tf
  14. 15 0
      terraform/aws/infrastructure/variables.tf
  15. 14 0
      terraform/aws/kubernetes/main.tf
  16. 37 0
      terraform/aws/kubernetes/provider.tf
  17. 4 4
      terraform/aws/variables.tf
  18. 0 60
      terraform/aws/modules/cluster/auth.tf
  19. 0 80
      terraform/aws/modules/cluster/irsa.tf
  20. 0 145
      terraform/aws/modules/cluster/main.tf
  21. 0 0
      terraform/aws/modules/cluster/outputs.tf
  22. 0 10
      terraform/aws/modules/cluster/provider.tf
  23. 0 0
      terraform/aws/outputs.tf
  24. 0 24
      terraform/aws/provider.tf
  25. 0 10
      terraform/azure/aks/output.tf
  26. 6 0
      terraform/azure/aks/main.tf
  27. 3 0
      terraform/azure/infrastructure/aks/outputs.tf
  28. 0 1
      terraform/azure/aks/variables.tf
  29. 0 0
      terraform/azure/infrastructure/key-vault/main.tf
  30. 0 0
      terraform/azure/infrastructure/key-vault/output.tf
  31. 0 0
      terraform/azure/infrastructure/key-vault/variables.tf
  32. 9 50
      terraform/azure/main.tf
  33. 32 0
      terraform/azure/infrastructure/providers.tf
  34. 6 6
      terraform/azure/service-principal/main.tf
  35. 2 2
      terraform/azure/service-principal/output.tf
  36. 0 0
      terraform/azure/infrastructure/service-principal/variables.tf
  37. 0 5
      terraform/azure/variables.tf
  38. 45 0
      terraform/azure/kubernetes/main.tf
  39. 56 0
      terraform/azure/kubernetes/provider.tf
  40. 5 0
      terraform/azure/kubernetes/variables.tf
  41. 4 5
      terraform/azure/workload-identity/main.tf
  42. 0 35
      terraform/azure/providers.tf
  43. 0 7
      terraform/azure/workload-identity/variables.tf
  44. 0 80
      terraform/gcp/eso_gcp_modules/gke/main.tf
  45. 0 48
      terraform/gcp/eso_gcp_modules/gke/variable.tf
  46. 0 29
      terraform/gcp/eso_gcp_modules/network/main.tf
  47. 0 18
      terraform/gcp/eso_gcp_modules/network/variable.tf
  48. 26 0
      terraform/gcp/infrastructure/main.tf
  49. 39 0
      terraform/gcp/infrastructure/modules/gke/main.tf
  50. 18 0
      terraform/gcp/infrastructure/modules/gke/variable.tf
  51. 14 0
      terraform/gcp/infrastructure/modules/network/main.tf
  52. 7 0
      terraform/gcp/infrastructure/modules/network/output.tf
  53. 6 0
      terraform/gcp/infrastructure/modules/network/variable.tf
  54. 29 0
      terraform/gcp/infrastructure/provider.tf
  55. 12 0
      terraform/gcp/infrastructure/variable.tf
  56. 8 0
      terraform/gcp/kubernetes/main.tf
  57. 42 0
      terraform/gcp/kubernetes/provider.tf
  58. 6 0
      terraform/gcp/kubernetes/variables.tf
  59. 0 29
      terraform/gcp/main.tf
  60. 0 13
      terraform/gcp/provider.tf
  61. 0 3
      terraform/gcp/provider_variables.tf
  62. 0 16
      terraform/gcp/variable.tf

+ 0 - 175
.github/actions/e2e-managed/action.yml

@@ -1,175 +0,0 @@
-name: "e2e"
-description: "runs our e2e test suite"
-
-runs:
-  using: composite
-  steps:
-
-    # create new status check for this specific provider
-    - uses: actions/github-script@v6
-      with:
-        github-token: ${{ env.GITHUB_TOKEN }}
-        script: |
-          const { data: pull } = await github.rest.pulls.get({
-            ...context.repo,
-            pull_number: process.env.GITHUB_PR_NUMBER
-          });
-          const ref = pull.head.sha;
-          const { data: checks } = await github.rest.checks.listForRef({
-            ...context.repo,
-            ref
-          });
-          const job_name = "e2e-managed-" + process.env.CLOUD_PROVIDER
-          const check = checks.check_runs.filter(c => c.name === job_name);
-          if(check && check.length > 0){
-            const { data: result } = await github.rest.checks.update({
-              ...context.repo,
-              check_run_id: check[0].id,
-              status: 'in_progress',
-            });
-            return result;
-          }
-          const { data: result } = await github.rest.checks.create({
-            ...context.repo,
-            name: job_name,
-            head_sha: pull.head.sha,
-            status: 'in_progress',
-          });
-          return result;
-
-    - name: Setup Go
-      uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
-      with:
-        go-version-file: go.mod
-
-    - name: Find the Go Cache
-      id: go
-      shell: bash
-      run: |
-        echo "build-cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
-        echo "mod-cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
-
-    - name: Cache the Go Build Cache
-      uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
-      with:
-        path: ${{ steps.go.outputs.build-cache }}
-        key: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
-        restore-keys: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-
-
-    - name: Cache Go Dependencies
-      uses: actions/cache@v3
-      with:
-        path: ${{ steps.go.outputs.mod-cache }}
-        key: ${{ runner.os }}-pkg-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
-        restore-keys: ${{ runner.os }}-pkg-${{ github.sha }}-
-
-    - uses: hashicorp/setup-terraform@v3
-
-    - name: Setup TFLint
-      uses: terraform-linters/setup-tflint@v2
-      with:
-        tflint_version: v0.28.0  # Must be specified. See: https://github.com/terraform-linters/tflint/releases for latest versions
-
-    - name: Run TFLint
-      shell: bash
-      run: find ${{ github.workspace }} | grep tf$ | xargs -n1 dirname | xargs -IXXX -n1 /bin/sh -c 'set -o errexit; cd XXX; pwd; tflint --loglevel=info .; cd - >/dev/null'
-
-    - name: Configure AWS Credentials
-      if: env.CLOUD_PROVIDER == 'aws'
-      uses: aws-actions/configure-aws-credentials@v1
-      with:
-        role-to-assume: ${{ env.AWS_OIDC_ROLE_ARN }}
-        aws-region: ${{ env.AWS_REGION }}
-
-    - name: Setup TF Gcloud Provider
-      shell: bash
-      if: env.CLOUD_PROVIDER == 'gcp'
-      env:
-        GCP_SM_SA_GKE_JSON: ${{ env.GCP_SM_SA_GKE_JSON }}
-      run: |-
-        mkdir -p terraform/gcp/secrets
-        echo ${GCP_SM_SA_GKE_JSON} > terraform/gcp/secrets/gcloud-service-account-key.json
-
-    - name: 'Az CLI login'
-      uses: azure/login@v1
-      if: env.CLOUD_PROVIDER == 'azure'
-      with:
-        client-id: ${{ env.TFC_AZURE_CLIENT_ID }}
-        tenant-id: ${{ env.TFC_AZURE_TENANT_ID }}
-        subscription-id: ${{ env.TFC_AZURE_SUBSCRIPTION_ID }}
-
-    - name: Show TF
-      shell: bash
-      env:
-        ARM_CLIENT_ID: "${{ env.TFC_AZURE_CLIENT_ID }}"
-        ARM_SUBSCRIPTION_ID: "${{ env.TFC_AZURE_SUBSCRIPTION_ID }}"
-        ARM_TENANT_ID: "${{ env.TFC_AZURE_TENANT_ID }}"
-      run: |-
-        PROVIDER=${{env.CLOUD_PROVIDER}}
-        make tf.show.${PROVIDER}
-
-    - name: Apply TF
-      shell: bash
-      env:
-        ARM_CLIENT_ID: "${{ env.TFC_AZURE_CLIENT_ID }}"
-        ARM_SUBSCRIPTION_ID: "${{ env.TFC_AZURE_SUBSCRIPTION_ID }}"
-        ARM_TENANT_ID: "${{ env.TFC_AZURE_TENANT_ID }}"
-      run: |-
-        PROVIDER=${{env.CLOUD_PROVIDER}}
-        make tf.apply.${PROVIDER}
-
-    - name: Setup gcloud CLI
-      if: env.CLOUD_PROVIDER == 'gcp'
-      uses: google-github-actions/setup-gcloud@v0
-      with:
-        service_account_key: ${{ env.GCP_SM_SA_GKE_JSON }}
-        project_id: ${{ env.GCP_PROJECT_ID }}
-        install_components: 'gke-gcloud-auth-plugin'
-
-    - name: Get the GKE credentials
-      shell: bash
-      if: env.CLOUD_PROVIDER == 'gcp'
-      run: |-
-        gcloud container clusters get-credentials "$GCP_GKE_CLUSTER" --zone "$GCP_GKE_ZONE" --project "$GCP_PROJECT_ID"
-
-    - name: Get the AWS credentials
-      shell: bash
-      if: env.CLOUD_PROVIDER == 'aws'
-      run: |-
-        aws --region $AWS_REGION eks update-kubeconfig --name $AWS_CLUSTER_NAME
-
-    - name: Get AKS credentials
-      if: env.CLOUD_PROVIDER == 'azure'
-      shell: bash
-      run: |-
-        az aks get-credentials --admin --name eso-cluster --resource-group external-secrets-operator
-
-    - name: Login to Docker
-      uses: docker/login-action@v2
-      if: env.GHCR_USERNAME != ''
-      with:
-        registry: ghcr.io
-        username: ${{ github.actor }}
-        password: ${{ github.token }}
-
-    - name: Run managed e2e Tests
-      shell: bash
-      env:
-        GCP_SM_SA_JSON: ${{ env.GCP_SM_SA_JSON }}
-      run: |
-        export PATH=$PATH:$(go env GOPATH)/bin
-        PROVIDER=${{env.CLOUD_PROVIDER}}
-        go install github.com/onsi/ginkgo/v2/ginkgo@v2.1.6
-        make test.e2e.managed GINKGO_LABELS="${PROVIDER} && managed" TEST_SUITES="provider"
-
-    - name: Destroy TF
-      shell: bash
-      if: always()
-      env:
-        ARM_CLIENT_ID: "${{ env.TFC_AZURE_CLIENT_ID }}"
-        ARM_SUBSCRIPTION_ID: "${{ env.TFC_AZURE_SUBSCRIPTION_ID }}"
-        ARM_TENANT_ID: "${{ env.TFC_AZURE_TENANT_ID }}"
-      run: |-
-        PROVIDER=${{env.CLOUD_PROVIDER}}
-        make tf.destroy.${PROVIDER}
-

+ 1 - 1
.github/actions/sign/action.yml

@@ -30,7 +30,7 @@ runs:
       run: cosign version
 
     - name: Login to ghcr.io
-      uses: docker/login-action@v1.14.1
+      uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
       with:
         registry: ghcr.io
         username: ${{ github.actor }}

+ 1 - 1
.github/workflows/ci.yml

@@ -98,7 +98,7 @@ jobs:
           egress-policy: audit
       - name: Checkout
         uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
-
+      - uses: hashicorp/setup-terraform@c529327889820530c60b4ce5bbc8d6099e166666 # v3
       - name: Setup Go
         uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
         id: setup-go

+ 345 - 76
.github/workflows/e2e-managed.yml

@@ -1,3 +1,5 @@
+name: managed e2e tests
+
 on:
   repository_dispatch:
     types: [ok-to-test-managed-command]
@@ -6,26 +8,26 @@ permissions:
   contents: read
 
 env:
-  # Common versions
   DOCKER_BUILDX_VERSION: 'v0.4.2'
-
-  # Common users. We can't run a step 'if secrets.GHCR_USERNAME != ""' but we can run
-  # a step 'if env.GHCR_USERNAME' != ""', so we copy these to succinctly test whether
-  # credentials have been provided before trying to run steps that need them.
   GHCR_USERNAME: ${{ github.actor }}
-  GCP_SM_SA_JSON: ${{ secrets.GCP_SM_SA_JSON}}
-  GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID}}
   USE_GKE_GCLOUD_AUTH_PLUGIN: true
-  TF_VAR_GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID}}
-  GCP_SM_SA_GKE_JSON: ${{ secrets.GCP_SM_SA_GKE_JSON}}
-  GCP_GKE_CLUSTER: test-cluster
-  GCP_GKE_ZONE: ${{ secrets.GCP_GKE_ZONE}}
-  GCP_GSA_NAME: ${{ secrets.GCP_GSA_NAME}} # Google Service Account
-  GCP_KSA_NAME: ${{ secrets.GCP_KSA_NAME}} # Kubernetes Service Account
-  TF_VAR_GCP_GSA_NAME: ${{ secrets.GCP_GSA_NAME}} # Google Service Account for tf
-  TF_VAR_GCP_KSA_NAME: ${{ secrets.GCP_KSA_NAME}} # Kubernetes Service Account for tf
-
-  AWS_OIDC_ROLE_ARN: ${{ secrets.AWS_OIDC_ROLE_ARN}}
+  
+  # GCP variables
+  GCP_SERVICE_ACCOUNT_KEY: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
+  GCP_SM_SA_GKE_JSON: ${{ secrets.GCP_SM_SA_GKE_JSON }}
+  GCP_GKE_CLUSTER: e2e
+  TF_VAR_GCP_GKE_CLUSTER: e2e
+  GCP_FED_REGION: ${{ secrets.GCP_FED_REGION }}
+  TF_VAR_GCP_FED_REGION: ${{ secrets.GCP_FED_REGION }}
+  GCP_KSA_NAME: ${{ secrets.GCP_KSA_NAME }}
+  TF_VAR_GCP_KSA_NAME: ${{ secrets.GCP_KSA_NAME }}
+  GCP_FED_PROJECT_ID: ${{ secrets.GCP_FED_PROJECT_ID }}
+  TF_VAR_GCP_FED_PROJECT_ID: ${{ secrets.GCP_FED_PROJECT_ID }}
+  GCP_FED_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_FED_SERVICE_ACCOUNT_EMAIL }}
+  GCP_FED_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_FED_WORKLOAD_IDENTITY_PROVIDER }}
+  
+  # AWS variables
+  AWS_OIDC_ROLE_ARN: ${{ secrets.AWS_OIDC_ROLE_ARN }}
   AWS_SA_NAME: ${{ secrets.AWS_SA_NAME }}
   AWS_SA_NAMESPACE: ${{ secrets.AWS_SA_NAMESPACE }}
   AWS_REGION: "eu-central-1"
@@ -34,71 +36,338 @@ env:
   TF_VAR_AWS_SA_NAMESPACE: ${{ secrets.AWS_SA_NAMESPACE }}
   TF_VAR_AWS_REGION: "eu-central-1"
   TF_VAR_AWS_CLUSTER_NAME: "eso-e2e-managed"
-
-  TFC_AZURE_CLIENT_ID: ${{ secrets.TFC_AZURE_CLIENT_ID}}
+  
+  # Azure variables
+  TFC_AZURE_CLIENT_ID: ${{ secrets.TFC_AZURE_CLIENT_ID }}
   TFC_AZURE_CLIENT_SECRET: ${{ secrets.TFC_AZURE_CLIENT_SECRET }}
-  TFC_AZURE_TENANT_ID: ${{ secrets.TFC_AZURE_TENANT_ID}}
+  TFC_AZURE_TENANT_ID: ${{ secrets.TFC_AZURE_TENANT_ID }}
   TFC_AZURE_SUBSCRIPTION_ID: ${{ secrets.TFC_AZURE_SUBSCRIPTION_ID }}
-  TFC_VAULT_URL: ${{ secrets.TFC_VAULT_URL}}
+  TFC_VAULT_URL: ${{ secrets.TFC_VAULT_URL }}
 
-  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-  GITHUB_PR_NUMBER: ${{ github.event.client_payload.pull_request.number }}
-  CLOUD_PROVIDER: ${{ github.event.client_payload.slash_command.args.named.provider }}
+jobs:
 
-name: managed e2e tests
+  setup:
+    runs-on: ubuntu-latest
+    permissions:
+      checks: write
+      contents: read
+    outputs:
+      check_run_id: ${{ steps.create_check.outputs.check_run_id }}
+    steps:
+      - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
+        with:
+          egress-policy: audit
 
-jobs:
-  run-e2e-managed:
+      - name: Create status check
+        id: create_check
+        uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
+        with:
+          github-token: ${{ secrets.GITHUB_TOKEN }}
+          script: |
+            const job_name = "e2e-managed-" + "${{ github.event.client_payload.slash_command.args.named.provider }}"
+            const ref = "${{ github.event.client_payload.pull_request.head.sha }}"
+            const { data: checks } = await github.rest.checks.listForRef({
+              ...context.repo,
+              ref
+            });
+            const check = checks.check_runs.filter(c => c.name === job_name);
+            if(check && check.length > 0){
+              const { data: result } = await github.rest.checks.update({
+                ...context.repo,
+                check_run_id: check[0].id,
+                status: 'in_progress',
+              });
+              core.setOutput('check_run_id', check[0].id);
+              return result;
+            }
+            const { data: result } = await github.rest.checks.create({
+              ...context.repo,
+              name: job_name,
+              head_sha: ref,
+              status: 'in_progress',
+            });
+            core.setOutput('check_run_id', result.id);
+            return result;
+
+  # AWS-specific job
+  test-aws:
+    runs-on: ubuntu-latest
+    if: github.event.client_payload.slash_command.args.named.provider == 'aws'
+    needs: [setup]
+    permissions:
+      id-token: write
+      contents: read
+      packages: write
+    steps:
+      - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
+        with:
+          egress-policy: audit
+
+      - name: Fork based /ok-to-test-managed checkout
+        uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+        with:
+          ref: 'refs/pull/${{ github.event.client_payload.pull_request.number }}/merge'
+
+      - name: Setup Go
+        uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+        with:
+          go-version-file: go.mod
+
+      - name: Find the Go Cache
+        id: go
+        run: |
+          echo "build-cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
+          echo "mod-cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
+
+      - name: Cache the Go Build Cache
+        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
+        with:
+          path: ${{ steps.go.outputs.build-cache }}
+          key: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
+          restore-keys: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-
+
+      - name: Cache Go Dependencies
+        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
+        with:
+          path: ${{ steps.go.outputs.mod-cache }}
+          key: ${{ runner.os }}-pkg-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
+          restore-keys: ${{ runner.os }}-pkg-${{ github.sha }}-
+
+      - uses: hashicorp/setup-terraform@c529327889820530c60b4ce5bbc8d6099e166666 # v3
+
+      - name: Configure AWS Credentials
+        uses: aws-actions/configure-aws-credentials@06083b756457410befa79ac62f8e6daf35a41869
+        with:
+          role-to-assume: ${{ secrets.AWS_OIDC_ROLE_ARN }}
+          aws-region: ${{ env.AWS_REGION }}
+
+      - name: Apply Terraform
+        run: make tf.apply.aws
+
+      - name: Get AWS EKS credentials
+        run: aws --region $AWS_REGION eks update-kubeconfig --name $AWS_CLUSTER_NAME
+
+      - name: Login to Docker
+        uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
+        if: env.GHCR_USERNAME != ''
+        with:
+          registry: ghcr.io
+          username: ${{ github.actor }}
+          password: ${{ secrets.GITHUB_TOKEN }}
+
+      - name: Run AWS e2e Tests
+        run: |
+          export PATH=$PATH:$(go env GOPATH)/bin
+          make test.e2e.managed GINKGO_LABELS="aws && managed" TEST_SUITES="provider"
+
+      - name: Destroy Terraform
+        if: always()
+        run: make tf.destroy.aws
+
+  # GCP-specific job
+  test-gcp:
     runs-on: ubuntu-latest
+    if: github.event.client_payload.slash_command.args.named.provider == 'gcp'
+    needs: [setup]
     permissions:
-      id-token: write #for oidc auth with aws/gcp/azure
-      checks: write   #publish the commit status
-      contents: read  #for checkout
-    if: github.event_name == 'repository_dispatch'
+      id-token: write
+      contents: read
+      packages: write
+    steps:
+      - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
+        with:
+          egress-policy: audit
+
+      - name: Fork based /ok-to-test-managed checkout
+        uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+        with:
+          ref: 'refs/pull/${{ github.event.client_payload.pull_request.number }}/merge'
+
+      - name: Setup Go
+        uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+        with:
+          go-version-file: go.mod
 
+      - name: Find the Go Cache
+        id: go
+        run: |
+          echo "build-cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
+          echo "mod-cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
+
+      - name: Cache the Go Build Cache
+        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
+        with:
+          path: ${{ steps.go.outputs.build-cache }}
+          key: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
+          restore-keys: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-
+
+      - name: Cache Go Dependencies
+        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
+        with:
+          path: ${{ steps.go.outputs.mod-cache }}
+          key: ${{ runner.os }}-pkg-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
+          restore-keys: ${{ runner.os }}-pkg-${{ github.sha }}-
+
+      - uses: hashicorp/setup-terraform@c529327889820530c60b4ce5bbc8d6099e166666 # v3
+
+      - name: Authenticate to Google Cloud
+        uses: 'google-github-actions/auth@fc2174804b84f912b1f6d334e9463f484f1c552d' # v3
+        with:
+          project_id: ${{ secrets.GCP_FED_PROJECT_ID }}
+          service_account: ${{ secrets.GCP_FED_SERVICE_ACCOUNT_EMAIL }}
+          workload_identity_provider: ${{ secrets.GCP_FED_WORKLOAD_IDENTITY_PROVIDER }}
+          create_credentials_file: true
+
+      - name: Apply Terraform
+        run: make tf.apply.gcp
+
+      - name: Setup gcloud CLI
+        uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3
+        with:
+          install_components: 'gke-gcloud-auth-plugin'
+
+      - name: Get GKE credentials
+        uses: google-github-actions/get-gke-credentials@3e00d2f47c840b194fc2ccc712879146e87f98cb # v3
+        with:
+          cluster_name: '${{ env.GCP_GKE_CLUSTER }}'
+          location: 'europe-west1'
+          project_id: '${{ secrets.GCP_FED_PROJECT_ID }}'
+
+      - name: Login to Docker
+        uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
+        if: env.GHCR_USERNAME != ''
+        with:
+          registry: ghcr.io
+          username: ${{ github.actor }}
+          password: ${{ secrets.GITHUB_TOKEN }}
+
+      - name: Run GCP e2e Tests
+        env:
+          GCP_SERVICE_ACCOUNT_KEY: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
+        run: |
+          export PATH=$PATH:$(go env GOPATH)/bin
+          make test.e2e.managed GINKGO_LABELS="gcp && managed" TEST_SUITES="provider"
+
+      - name: Destroy Terraform
+        if: always()
+        run: make tf.destroy.gcp
+
+  # Azure-specific job
+  test-azure:
+    runs-on: ubuntu-latest
+    if: github.event.client_payload.slash_command.args.named.provider == 'azure'
+    needs: [setup]
+    permissions:
+      id-token: write
+      contents: read
+      packages: write
+    steps:
+      - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
+        with:
+          egress-policy: audit
+
+      - name: Fork based /ok-to-test-managed checkout
+        uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+        with:
+          ref: 'refs/pull/${{ github.event.client_payload.pull_request.number }}/merge'
+
+      - name: Setup Go
+        uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+        with:
+          go-version-file: go.mod
+
+      - name: Find the Go Cache
+        id: go
+        run: |
+          echo "build-cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
+          echo "mod-cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
+
+      - name: Cache the Go Build Cache
+        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
+        with:
+          path: ${{ steps.go.outputs.build-cache }}
+          key: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
+          restore-keys: ${{ runner.os }}-build-unit-tests-${{ github.sha }}-
+
+      - name: Cache Go Dependencies
+        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
+        with:
+          path: ${{ steps.go.outputs.mod-cache }}
+          key: ${{ runner.os }}-pkg-${{ github.sha }}-${{ hashFiles('**/go.sum') }}
+          restore-keys: ${{ runner.os }}-pkg-${{ github.sha }}-
+
+      - uses: hashicorp/setup-terraform@c529327889820530c60b4ce5bbc8d6099e166666 # v3
+
+      - name: Azure CLI login
+        uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5
+        with:
+          client-id: ${{ secrets.TFC_AZURE_CLIENT_ID }}
+          tenant-id: ${{ secrets.TFC_AZURE_TENANT_ID }}
+          subscription-id: ${{ secrets.TFC_AZURE_SUBSCRIPTION_ID }}
+
+      - name: Apply Terraform
+        env:
+          ARM_CLIENT_ID: "${{ secrets.TFC_AZURE_CLIENT_ID }}"
+          ARM_SUBSCRIPTION_ID: "${{ secrets.TFC_AZURE_SUBSCRIPTION_ID }}"
+          ARM_TENANT_ID: "${{ secrets.TFC_AZURE_TENANT_ID }}"
+        run: make tf.apply.azure
+
+      - name: Get AKS credentials
+        run: az aks get-credentials --admin --name eso-cluster --resource-group external-secrets-e2e
+
+      - name: Login to Docker
+        uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
+        if: env.GHCR_USERNAME != ''
+        with:
+          registry: ghcr.io
+          username: ${{ github.actor }}
+          password: ${{ secrets.GITHUB_TOKEN }}
+
+      - name: Run Azure e2e Tests
+        run: |
+          export PATH=$PATH:$(go env GOPATH)/bin
+          make test.e2e.managed GINKGO_LABELS="azure && managed" TEST_SUITES="provider"
+
+      - name: Destroy Terraform
+        if: always()
+        env:
+          ARM_CLIENT_ID: "${{ secrets.TFC_AZURE_CLIENT_ID }}"
+          ARM_SUBSCRIPTION_ID: "${{ secrets.TFC_AZURE_SUBSCRIPTION_ID }}"
+          ARM_TENANT_ID: "${{ secrets.TFC_AZURE_TENANT_ID }}"
+        run: make tf.destroy.azure
+
+  # Final status update job
+  update-status:
+    runs-on: ubuntu-latest
+    if: always()
+    needs: [setup, test-aws, test-gcp, test-azure]
+    permissions:
+      checks: write
+      contents: read
     steps:
-    - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
-      with:
-        egress-policy: audit
-
-    # Check out merge commit
-    - name: Fork based /ok-to-test-managed checkout
-      uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
-      with:
-        ref: 'refs/pull/${{ env.GITHUB_PR_NUMBER }}/merge'
-
-    - name: Fetch History
-      run: git fetch --prune --unshallow
-
-    - uses: ./.github/actions/e2e-managed
-
-    # set status=completed
-    - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
-      if: ${{ always() }}
-      env:
-        number: ${{ env.GITHUB_PR_NUMBER }}
-        provider: ${{ env.CLOUD_PROVIDER }}
-        job: ${{ github.job }}
-        # Conveniently, job.status maps to https://developer.github.com/v3/checks/runs/#update-a-check-run
-        conclusion: ${{ job.status }}
-      with:
-        github-token: ${{ env.GITHUB_TOKEN }}
-        script: |
-          const { data: pull } = await github.rest.pulls.get({
-            ...context.repo,
-            pull_number: process.env.number
-          });
-          const ref = pull.head.sha;
-          const { data: checks } = await github.rest.checks.listForRef({
-            ...context.repo,
-            ref
-          });
-          const job_name = "e2e-managed-" + process.env.provider
-          const check = checks.check_runs.filter(c => c.name === job_name);
-          const { data: result } = await github.rest.checks.update({
-            ...context.repo,
-            check_run_id: check[0].id,
-            status: 'completed',
-            conclusion: process.env.conclusion
-          });
-          return result;
+      - name: Update status check
+        uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
+        with:
+          github-token: ${{ secrets.GITHUB_TOKEN }}
+          script: |
+            const conclusion = '${{ needs.test-aws.result }}${{ needs.test-gcp.result }}${{ needs.test-azure.result }}';
+            const checkRunId = '${{ needs.setup.outputs.check_run_id }}';
+            
+            // Determine the overall conclusion
+            let finalConclusion = 'success';
+            if (conclusion.includes('failure')) {
+              finalConclusion = 'failure';
+            } else if (conclusion.includes('cancelled')) {
+              finalConclusion = 'cancelled';
+            } else if (conclusion === '') {
+              finalConclusion = 'skipped';
+            }
+            
+            if (checkRunId) {
+              await github.rest.checks.update({
+                ...context.repo,
+                check_run_id: checkRunId,
+                status: 'completed',
+                conclusion: finalConclusion
+              });
+            }

+ 0 - 1
.github/workflows/ok-to-test-managed.yml

@@ -37,7 +37,6 @@ jobs:
         TOKEN: ${{ steps.generate_token.outputs.token }}
       with:
         token: ${{ env.TOKEN }} # GitHub App installation access token
-        # token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT or OAuth token will also work
         reaction-token: ${{ secrets.GITHUB_TOKEN }}
         issue-type: pull-request
         commands: ok-to-test-managed

+ 23 - 17
Makefile

@@ -72,7 +72,7 @@ FAIL	= (echo ${TIME} ${RED}[FAIL]${CNone} && false)
 # ====================================================================================
 # Conformance
 
-reviewable: generate docs manifests helm.generate helm.schema.update helm.docs lint license.check helm.test.update test.crds.update ## Ensure a PR is ready for review.
+reviewable: generate docs manifests helm.generate helm.schema.update helm.docs lint license.check helm.test.update test.crds.update tf.fmt ## Ensure a PR is ready for review.
 	@go mod tidy
 	@cd e2e/ && go mod tidy
 
@@ -324,26 +324,32 @@ docker.promote: ## Promote the docker image to the registry
 # ====================================================================================
 # Terraform
 
-tf.plan.%: ## Runs terraform plan for a provider
-	@cd $(TF_DIR)/$*; \
-	terraform init; \
-	terraform plan
+define run_terraform
+	@cd $(TF_DIR)/$1/infrastructure && \
+	terraform init && \
+	$2 && \
+	cd ../kubernetes && \
+	terraform init && \
+	$3
+endef
+
+tf.plan.%:
+	$(call run_terraform,$*,terraform plan,terraform plan)
 
-tf.apply.%: ## Runs terraform apply for a provider
-	@cd $(TF_DIR)/$*; \
-	terraform init; \
-	terraform apply -auto-approve
+tf.apply.%:
+	$(call run_terraform,$*,terraform apply -auto-approve,terraform apply -auto-approve)
 
-tf.destroy.%: ## Runs terraform destroy for a provider
-	@cd $(TF_DIR)/$*; \
-	terraform init; \
+tf.destroy.%:
+	@cd $(TF_DIR)/$*/kubernetes && \
+	terraform init && \
+	terraform destroy -auto-approve && \
+	cd ../infrastructure && \
+	terraform init && \
 	terraform destroy -auto-approve
 
-tf.show.%: ## Runs terraform show for a provider and outputs to a file
-	@cd $(TF_DIR)/$*; \
-	terraform init; \
-	terraform plan -out tfplan.binary; \
-	terraform show -json tfplan.binary > plan.json
+tf.fmt:
+	@cd $(TF_DIR) && \
+	terraform fmt -recursive
 
 # ====================================================================================
 # Help

+ 10 - 0
terraform/aws/main.tf

@@ -1,3 +1,12 @@
+locals {
+  tags = {
+    Environment = "development"
+    Owner       = "external-secrets"
+    Repository  = "external-secrets"
+    Purpose     = "managed e2e tests"
+  }
+}
+
 module "cluster" {
   source = "./modules/cluster"
 
@@ -5,4 +14,5 @@ module "cluster" {
   cluster_region    = var.AWS_REGION
   irsa_sa_name      = var.AWS_SA_NAME
   irsa_sa_namespace = var.AWS_SA_NAMESPACE
+  tags              = local.tags
 }

+ 55 - 0
terraform/aws/infrastructure/modules/cluster/irsa.tf

@@ -0,0 +1,55 @@
+data "aws_iam_policy_document" "assume-policy" {
+  statement {
+    actions = ["sts:AssumeRoleWithWebIdentity"]
+    condition {
+      test     = "StringEquals"
+      variable = "${trimprefix(module.eks.cluster_oidc_issuer_url, "https://")}:sub"
+
+      values = [
+        "system:serviceaccount:${local.serviceaccount_namespace}:${local.serviceaccount_name}"
+      ]
+    }
+
+    principals {
+      type        = "Federated"
+      identifiers = [module.eks.oidc_provider_arn]
+    }
+  }
+}
+
+# Create the IAM policy document for SSM Parameter Store access
+data "aws_iam_policy_document" "ssm_parameterstore" {
+  statement {
+    actions = [
+      "ssm:GetParameter*",
+      "ssm:PutParameter",
+      "ssm:DescribeParameters",
+      "ssm:DeleteParameter*",
+      "ssm:AddTagsToResource",
+      "ssm:ListTagsForResource",
+      "ssm:RemoveTagsFromResource",
+      "tag:GetResources"
+    ]
+    effect    = "Allow"
+    resources = ["*"]
+  }
+}
+
+resource "aws_iam_role" "eso-e2e-irsa" {
+  name               = "eso-e2e-irsa"
+  path               = "/"
+  assume_role_policy = data.aws_iam_policy_document.assume-policy.json
+}
+
+# Attach the AWS managed policy for Secrets Manager
+resource "aws_iam_role_policy_attachment" "secrets_manager" {
+  role       = aws_iam_role.eso-e2e-irsa.name
+  policy_arn = "arn:aws:iam::aws:policy/SecretsManagerReadWrite"
+}
+
+# Create and attach the inline policy for SSM Parameter Store
+resource "aws_iam_role_policy" "ssm_parameterstore" {
+  name   = "aws_ssm_parameterstore"
+  role   = aws_iam_role.eso-e2e-irsa.id
+  policy = data.aws_iam_policy_document.ssm_parameterstore.json
+}

+ 100 - 0
terraform/aws/infrastructure/modules/cluster/main.tf

@@ -0,0 +1,100 @@
+
+locals {
+  name            = var.cluster_name
+  cluster_version = "1.33"
+  region          = var.cluster_region
+
+  serviceaccount_name      = var.irsa_sa_name
+  serviceaccount_namespace = var.irsa_sa_namespace
+}
+
+data "aws_caller_identity" "current" {}
+
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = local.name
+  kubernetes_version = local.cluster_version
+
+  compute_config = {
+    enabled    = true
+    node_pools = ["general-purpose"]
+  }
+
+  vpc_id                  = module.vpc.vpc_id
+  subnet_ids              = module.vpc.private_subnets
+  endpoint_private_access = true
+  endpoint_public_access  = true
+  enable_irsa             = true
+
+  addons = {
+    coredns = {
+      most_recent = true
+    }
+    kube-proxy = {
+      most_recent = true
+    }
+    vpc-cni = {
+      most_recent = true
+    }
+    eks-pod-identity-agent = {
+      most_recent = true
+    }
+  }
+
+  access_entries = {
+    tf-admin = {
+      principal_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/admin"
+      policy_associations = {
+        tf-admin = {
+          policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
+          access_scope = {
+            type = "cluster"
+          }
+        }
+      }
+    }
+    github-actions = {
+      principal_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/github-actions-external-secrets"
+      policy_associations = {
+        github-actions = {
+          policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
+          access_scope = {
+            type = "cluster"
+          }
+        }
+      }
+    }
+  }
+}
+
+################################################################################
+# Supporting resources
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = "10.0.0.0/16"
+
+  azs             = ["${local.region}a", "${local.region}b", "${local.region}c"]
+  private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+  public_subnets  = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+
+  enable_nat_gateway   = true
+  single_nat_gateway   = true
+  enable_dns_hostnames = true
+
+  public_subnet_tags = {
+    "kubernetes.io/cluster/${local.name}" = "shared"
+    "kubernetes.io/role/elb"              = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/cluster/${local.name}" = "shared"
+    "kubernetes.io/role/internal-elb"     = 1
+  }
+}

+ 44 - 0
terraform/aws/infrastructure/modules/cluster/outputs.tf

@@ -0,0 +1,44 @@
+output "cluster_id" {
+  description = "The ID/name of the EKS cluster"
+  value       = module.eks.cluster_id
+}
+
+output "cluster_arn" {
+  description = "The Amazon Resource Name (ARN) of the cluster"
+  value       = module.eks.cluster_arn
+}
+
+output "cluster_endpoint" {
+  description = "Endpoint for your Kubernetes API server"
+  value       = module.eks.cluster_endpoint
+}
+
+output "cluster_security_group_id" {
+  description = "Cluster security group that was created by Amazon EKS for the cluster"
+  value       = module.eks.cluster_security_group_id
+}
+
+output "cluster_iam_role_name" {
+  description = "IAM role name associated with EKS cluster"
+  value       = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+  description = "IAM role ARN associated with EKS cluster"
+  value       = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_certificate_authority_data" {
+  description = "Base64 encoded certificate data required to communicate with the cluster"
+  value       = module.eks.cluster_certificate_authority_data
+}
+
+output "oidc_provider_arn" {
+  description = "The ARN of the OIDC Provider if enabled"
+  value       = module.eks.oidc_provider_arn
+}
+
+output "cluster_primary_security_group_id" {
+  description = "Cluster security group that was created by Amazon EKS for the cluster"
+  value       = module.eks.cluster_primary_security_group_id
+}

+ 31 - 0
terraform/aws/infrastructure/modules/cluster/provider.tf

@@ -0,0 +1,31 @@
+terraform {
+  required_version = ">= 0.13"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = "~> 6.0"
+    }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "~> 2.0"
+    }
+  }
+}
+
+provider "aws" {
+  region = local.region
+  default_tags {
+    tags = var.tags
+  }
+}
+
+provider "kubernetes" {
+  host                   = module.eks.cluster_endpoint
+  cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+  token                  = data.aws_eks_cluster_auth.this.token
+}
+
+data "aws_eks_cluster_auth" "this" {
+  name = module.eks.cluster_name
+}

+ 5 - 0
terraform/aws/modules/cluster/variables.tf

@@ -14,3 +14,8 @@ variable "irsa_sa_namespace" {
 variable "cluster_region" {
   type = string
 }
+
+variable "tags" {
+  type    = map(string)
+  default = {}
+}

+ 9 - 0
terraform/aws/infrastructure/provider.tf

@@ -0,0 +1,9 @@
+terraform {
+  required_version = ">= 0.13"
+
+  backend "s3" {
+    bucket = "eso-tfstate-e2e-managed"
+    key    = "aws-tfstate"
+    region = "eu-central-1"
+  }
+}

+ 15 - 0
terraform/aws/infrastructure/variables.tf

@@ -0,0 +1,15 @@
+variable "AWS_SA_NAME" {
+  type = string
+}
+
+variable "AWS_SA_NAMESPACE" {
+  type = string
+}
+
+variable "AWS_REGION" {
+  type = string
+}
+
+variable "AWS_CLUSTER_NAME" {
+  type = string
+}

+ 14 - 0
terraform/aws/kubernetes/main.tf

@@ -0,0 +1,14 @@
+// must match IAM Role in infrastructure/modules/cluster 
+data "aws_iam_role" "eso-e2e-irsa" {
+  name = "eso-e2e-irsa"
+}
+
+resource "kubernetes_service_account" "this" {
+  metadata {
+    name      = var.AWS_SA_NAME
+    namespace = var.AWS_SA_NAMESPACE
+    annotations = {
+      "eks.amazonaws.com/role-arn" = data.aws_iam_role.eso-e2e-irsa.arn
+    }
+  }
+}

+ 37 - 0
terraform/aws/kubernetes/provider.tf

@@ -0,0 +1,37 @@
+terraform {
+  required_version = ">= 0.13"
+
+  backend "s3" {
+    bucket = "eso-tfstate-e2e-managed"
+    key    = "aws-tfstate-kubernetes"
+    region = "eu-central-1"
+  }
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = "~> 6.0"
+    }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "~> 2.0"
+    }
+  }
+}
+
+provider "aws" {
+  region = var.AWS_REGION
+}
+
+provider "kubernetes" {
+  host                   = data.aws_eks_cluster.this.endpoint
+  cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
+  token                  = data.aws_eks_cluster_auth.this.token
+}
+
+data "aws_eks_cluster_auth" "this" {
+  name = var.AWS_CLUSTER_NAME
+}
+data "aws_eks_cluster" "this" {
+  name = var.AWS_CLUSTER_NAME
+}

+ 4 - 4
terraform/aws/variables.tf

@@ -1,15 +1,15 @@
 variable "AWS_SA_NAME" {
-  type    = string
+  type = string
 }
 
 variable "AWS_SA_NAMESPACE" {
-  type    = string
+  type = string
 }
 
 variable "AWS_REGION" {
-  type    = string
+  type = string
 }
 
 variable "AWS_CLUSTER_NAME" {
-  type    = string
+  type = string
 }

+ 0 - 60
terraform/aws/modules/cluster/auth.tf

@@ -1,60 +0,0 @@
-
-data "aws_eks_cluster_auth" "this" {
-  name = module.eks.cluster_id
-}
-
-data "aws_caller_identity" "current" {}
-
-locals {
-  kubeconfig = yamlencode({
-    apiVersion      = "v1"
-    kind            = "Config"
-    current-context = "terraform"
-    clusters = [{
-      name = module.eks.cluster_id
-      cluster = {
-        certificate-authority-data = module.eks.cluster_certificate_authority_data
-        server                     = module.eks.cluster_endpoint
-      }
-    }]
-    contexts = [{
-      name = "terraform"
-      context = {
-        cluster = module.eks.cluster_id
-        user    = "terraform"
-      }
-    }]
-    users = [{
-      name = "terraform"
-      user = {
-        token = data.aws_eks_cluster_auth.this.token
-      }
-    }]
-  })
-
-  # we have to allow the root account to access the api
-  aws_auth_configmap_yaml = <<-EOT
-  ${chomp(module.eks.aws_auth_configmap_yaml)}
-      - rolearn: arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/admin
-        username: system:aws:root
-        groups:
-          - system:masters
-  EOT
-}
-
-resource "null_resource" "patch_cm" {
-  triggers = {
-    kubeconfig = base64encode(local.kubeconfig)
-    cmd_patch  = <<-EOT
-      kubectl patch configmap/aws-auth --patch "${local.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
-    EOT
-  }
-
-  provisioner "local-exec" {
-    interpreter = ["/bin/bash", "-c"]
-    environment = {
-      KUBECONFIG = self.triggers.kubeconfig
-    }
-    command = self.triggers.cmd_patch
-  }
-}

+ 0 - 80
terraform/aws/modules/cluster/irsa.tf

@@ -1,80 +0,0 @@
-locals {
-  sa_manifest = <<-EOT
-      apiVersion: v1
-      kind: ServiceAccount
-      metadata:
-        name: ${local.serviceaccount_name}
-        namespace: ${local.serviceaccount_namespace}
-        annotations:
-          eks.amazonaws.com/role-arn: "${aws_iam_role.eso-e2e-irsa.arn}"
-  EOT
-}
-
-data "aws_iam_policy_document" "assume-policy" {
-  statement {
-    actions = ["sts:AssumeRoleWithWebIdentity"]
-    condition {
-      test     = "StringEquals"
-      variable = "${trimprefix(module.eks.cluster_oidc_issuer_url, "https://")}:sub"
-
-      values = [
-        "system:serviceaccount:${local.serviceaccount_namespace}:${local.serviceaccount_name}"
-      ]
-    }
-
-    principals {
-      type        = "Federated"
-      identifiers = [module.eks.oidc_provider_arn]
-    }
-  }
-}
-
-resource "aws_iam_role" "eso-e2e-irsa" {
-  name               = "eso-e2e-irsa"
-  path               = "/"
-  assume_role_policy = data.aws_iam_policy_document.assume-policy.json
-  managed_policy_arns = [
-    "arn:aws:iam::aws:policy/SecretsManagerReadWrite"
-  ]
-
-  inline_policy {
-    name = "aws_ssm_parameterstore"
-
-    policy = jsonencode({
-      Version = "2012-10-17"
-      Statement = [
-        {
-          Action = [
-            "ssm:GetParameter*",
-            "ssm:PutParameter",
-            "ssm:DescribeParameters",
-            "ssm:DeleteParameter*",
-            "ssm:AddTagsToResource",
-            "ssm:ListTagsForResource",
-            "ssm:RemoveTagsFromResource",
-            "tag:GetResources"
-          ]
-          Effect   = "Allow"
-          Resource = "*"
-        },
-      ]
-    })
-  }
-}
-
-resource "null_resource" "apply_sa" {
-  triggers = {
-    kubeconfig = base64encode(local.kubeconfig)
-    cmd_patch  = <<-EOT
-      echo '${local.sa_manifest}' | kubectl --kubeconfig <(echo $KUBECONFIG | base64 --decode) apply -f -
-    EOT
-  }
-
-  provisioner "local-exec" {
-    interpreter = ["/bin/bash", "-c"]
-    environment = {
-      KUBECONFIG = self.triggers.kubeconfig
-    }
-    command = self.triggers.cmd_patch
-  }
-}

+ 0 - 145
terraform/aws/modules/cluster/main.tf

@@ -1,145 +0,0 @@
-provider "aws" {
-  region = local.region
-}
-
-locals {
-  name            = var.cluster_name
-  cluster_version = "1.27"
-  region          = var.cluster_region
-
-  serviceaccount_name      = var.irsa_sa_name
-  serviceaccount_namespace = var.irsa_sa_namespace
-
-  tags = {
-    Example    = local.name
-    GithubRepo = "external-secrets"
-    GithubOrg  = "external-secrets"
-  }
-}
-
-module "eks" {
-  source = "git::https://github.com/terraform-aws-modules/terraform-aws-eks?ref=v18.2.0"
-
-  cluster_name                    = local.name
-  cluster_version                 = local.cluster_version
-  cluster_endpoint_private_access = true
-  cluster_endpoint_public_access  = true
-
-  cluster_addons = {
-    coredns = {
-      resolve_conflicts = "OVERWRITE"
-    }
-    kube-proxy = {}
-    vpc-cni = {
-      resolve_conflicts = "OVERWRITE"
-    }
-
-  }
-
-  vpc_id      = module.vpc.vpc_id
-  subnet_ids  = module.vpc.private_subnets
-  enable_irsa = true
-
-  # EKS Managed Node Group(s)
-  eks_managed_node_group_defaults = {
-    ami_type               = "AL2_x86_64"
-    disk_size              = 50
-    instance_types         = ["t3.large"]
-    vpc_security_group_ids = [aws_security_group.additional.id]
-  }
-
-
-  eks_managed_node_groups = {
-    example = {
-      desired_size = 2
-
-      instance_types = ["t3.large"]
-      tags           = local.tags
-
-    }
-  }
-
-  tags = local.tags
-}
-
-################################################################################
-# Supporting resources
-################################################################################
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 3.14"
-
-  name = local.name
-  cidr = "10.0.0.0/16"
-
-  azs             = ["${local.region}a", "${local.region}b", "${local.region}c"]
-  private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
-  public_subnets  = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-
-  enable_nat_gateway   = true
-  single_nat_gateway   = true
-  enable_dns_hostnames = true
-
-  enable_flow_log                      = false
-  create_flow_log_cloudwatch_iam_role  = false
-  create_flow_log_cloudwatch_log_group = false
-
-  public_subnet_tags = {
-    "kubernetes.io/cluster/${local.name}" = "shared"
-    "kubernetes.io/role/elb"              = 1
-  }
-
-  private_subnet_tags = {
-    "kubernetes.io/cluster/${local.name}" = "shared"
-    "kubernetes.io/role/internal-elb"     = 1
-  }
-
-  tags = local.tags
-}
-
-resource "aws_security_group" "additional" {
-  name_prefix = "${local.name}-additional"
-  vpc_id      = module.vpc.vpc_id
-
-  ingress {
-    from_port = 22
-    to_port   = 22
-    protocol  = "tcp"
-    cidr_blocks = [
-      "10.0.0.0/8",
-      "172.16.0.0/12",
-      "192.168.0.0/16",
-    ]
-  }
-
-  # allow control-plane to access webhook
-  ingress {
-    from_port        = 9443
-    to_port          = 9443
-    protocol         = "tcp"
-    cidr_blocks      = ["0.0.0.0/0"]
-    ipv6_cidr_blocks = ["::/0"]
-  }
-
-  ingress {
-    from_port        = 443
-    to_port          = 443
-    protocol         = "tcp"
-    cidr_blocks      = ["0.0.0.0/0"]
-    ipv6_cidr_blocks = ["::/0"]
-  }
-
-
-  # 443, 53, 123 is already allowed
-  egress {
-    from_port        = 80
-    to_port          = 80
-    protocol         = "tcp"
-    cidr_blocks      = ["0.0.0.0/0"]
-    ipv6_cidr_blocks = ["::/0"]
-  }
-
-
-  tags = local.tags
-}

+ 0 - 0
terraform/aws/modules/cluster/outputs.tf


+ 0 - 10
terraform/aws/modules/cluster/provider.tf

@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13"
-
-  required_providers {
-    aws = {
-      source  = "hashicorp/aws"
-      version = "~> 3.0"
-    }
-  }
-}

+ 0 - 0
terraform/aws/outputs.tf


+ 0 - 24
terraform/aws/provider.tf

@@ -1,24 +0,0 @@
-terraform {
-  required_version = ">= 0.13"
-
-  backend "s3" {
-    bucket = "eso-tfstate-e2e-managed"
-    key    = "aws-tfstate"
-    region = "eu-central-1"
-  }
-
-  required_providers {}
-}
-
-provider "aws" {
-  region = "eu-central-1"
-
-  default_tags {
-    tags = {
-      Environment = "development"
-      Owner       = "external-secrets"
-      Repository  = "external-secrets"
-      Purpose     = "managed e2e tests"
-    }
-  }
-}

+ 0 - 10
terraform/azure/aks/output.tf

@@ -1,10 +0,0 @@
-
-output "cluster_issuer_url" {
-  value = azurerm_kubernetes_cluster.current.oidc_issuer_url
-}
-
-output "kube_config" {
-  value = azurerm_kubernetes_cluster.current.kube_config_raw
-
-  sensitive = true
-}

+ 6 - 0
terraform/azure/aks/main.tf

@@ -12,6 +12,12 @@ resource "azurerm_kubernetes_cluster" "current" {
     name       = var.default_node_pool_name
     node_count = var.default_node_pool_node_count
     vm_size    = var.default_node_pool_vm_size
+
+    upgrade_settings {
+      drain_timeout_in_minutes      = 10
+      max_surge                     = "33%"
+      node_soak_duration_in_minutes = 10
+    }
   }
 
   identity {

+ 3 - 0
terraform/azure/infrastructure/aks/outputs.tf

@@ -0,0 +1,3 @@
+output "cluster_issuer_url" {
+  value = azurerm_kubernetes_cluster.current.oidc_issuer_url
+}

+ 0 - 1
terraform/azure/aks/variables.tf

@@ -39,7 +39,6 @@ variable "default_node_pool_node_count" {
 variable "default_node_pool_vm_size" {
   type        = string
   description = " The SKU which should be used for the Virtual Machines used in this Node Pool"
-
 }
 
 variable "cluster_tags" {

terraform/azure/key-vault/main.tf → terraform/azure/infrastructure/key-vault/main.tf


terraform/azure/key-vault/output.tf → terraform/azure/infrastructure/key-vault/output.tf


terraform/azure/key-vault/variables.tf → terraform/azure/infrastructure/key-vault/variables.tf


+ 9 - 50
terraform/azure/main.tf

@@ -2,15 +2,19 @@ data "azurerm_client_config" "current" {}
 
 data "azurerm_subscription" "primary" {}
 
+locals {
+  resource_group_name = "external-secrets-e2e"
+}
+
 resource "azurerm_resource_group" "current" {
-  name     = var.resource_group_name
+  name     = local.resource_group_name
   location = var.resource_group_location
 }
 
 module "test_sp" {
   source = "./service-principal"
 
-  application_display_name = var.application_display_name
+  application_display_name = "managed-e2e-suite-external-secrets-operator"
   application_owners       = [data.azurerm_client_config.current.object_id]
   issuer                   = module.test_aks.cluster_issuer_url
   subject                  = "system:serviceaccount:${var.sa_namespace}:${var.sa_name}"
@@ -23,7 +27,7 @@ module "test_sp" {
 module "e2e_sp" {
   source = "./service-principal"
 
-  application_display_name = var.application_display_name
+  application_display_name = "managed-e2e-suite-external-secrets-e2e"
   application_owners       = [data.azurerm_client_config.current.object_id]
   issuer                   = module.test_aks.cluster_issuer_url
   subject                  = "system:serviceaccount:default:external-secrets-e2e"
@@ -34,7 +38,7 @@ module "test_key_vault" {
 
   key_vault_display_name  = var.key_vault_display_name
   resource_group_location = var.resource_group_location
-  resource_group_name     = var.resource_group_name
+  resource_group_name     = local.resource_group_name
   tenant_id               = data.azurerm_client_config.current.tenant_id
   client_object_id        = data.azurerm_client_config.current.object_id
   eso_sp_object_id        = module.test_sp.sp_object_id
@@ -45,19 +49,12 @@ module "test_key_vault" {
   ]
 }
 
-module "test_workload_identity" {
-  source = "./workload-identity"
-
-  tenant_id = data.azurerm_client_config.current.tenant_id
-  tags      = var.cluster_tags
-
-}
 
 module "test_aks" {
   source = "./aks"
 
   cluster_name                 = var.cluster_name
-  resource_group_name          = var.resource_group_name
+  resource_group_name          = local.resource_group_name
   resource_group_location      = var.resource_group_location
   default_node_pool_node_count = var.default_node_pool_node_count
   default_node_pool_vm_size    = var.default_node_pool_vm_size
@@ -77,41 +74,3 @@ resource "azurerm_role_assignment" "current" {
     azurerm_resource_group.current
   ]
 }
-
-resource "kubernetes_namespace" "eso" {
-  metadata {
-    name = "external-secrets-operator"
-  }
-}
-
-// the `e2e` pod itself runs with workload identity and
-// does not rely on client credentials.
-resource "kubernetes_service_account" "e2e" {
-  metadata {
-    name      = "external-secrets-e2e"
-    namespace = "default"
-    annotations = {
-      "azure.workload.identity/client-id" = module.e2e_sp.application_id
-      "azure.workload.identity/tenant-id" = data.azurerm_client_config.current.tenant_id
-    }
-    labels = {
-      "azure.workload.identity/use" = "true"
-    }
-  }
-  depends_on = [module.test_aks, kubernetes_namespace.eso]
-}
-
-resource "kubernetes_service_account" "current" {
-  metadata {
-    name      = "external-secrets-operator"
-    namespace = "external-secrets-operator"
-    annotations = {
-      "azure.workload.identity/client-id" = module.test_sp.application_id
-      "azure.workload.identity/tenant-id" = data.azurerm_client_config.current.tenant_id
-    }
-    labels = {
-      "azure.workload.identity/use" = "true"
-    }
-  }
-  depends_on = [module.test_aks, kubernetes_namespace.eso]
-}

+ 32 - 0
terraform/azure/infrastructure/providers.tf

@@ -0,0 +1,32 @@
+terraform {
+  backend "azurerm" {
+    resource_group_name  = "external-secrets-tfstate-rg"
+    storage_account_name = "esoe2emanagedtfstate"
+    container_name       = "tfstate"
+    key                  = "infrastructure/terraform.tfstate"
+  }
+  required_providers {
+    azuread = {
+      source  = "hashicorp/azuread"
+      version = "~> 2.0"
+    }
+    azurerm = {
+      source  = "hashicorp/azurerm"
+      version = "~> 3.0"
+    }
+    helm = {
+      source  = "hashicorp/helm"
+      version = "~> 3.0"
+    }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "~> 2.0"
+    }
+  }
+}
+
+provider "azurerm" {
+  features {}
+  # set this to false when running locally
+  use_oidc = false
+}

+ 6 - 6
terraform/azure/service-principal/main.tf

@@ -4,7 +4,7 @@ resource "azuread_application" "current" {
 }
 
 resource "azuread_service_principal" "current" {
-  application_id               = azuread_application.current.application_id
+  client_id                    = azuread_application.current.client_id
   app_role_assignment_required = false
   owners                       = var.application_owners
   feature_tags {
@@ -18,9 +18,9 @@ resource "azuread_service_principal_password" "current" {
 }
 
 resource "azuread_application_federated_identity_credential" "example" {
-  application_object_id = azuread_application.current.object_id
-  display_name          = var.application_display_name
-  audiences             = var.audiences
-  issuer                = var.issuer
-  subject               = var.subject
+  application_id = "/applications/${azuread_application.current.object_id}"
+  display_name   = var.application_display_name
+  audiences      = var.audiences
+  issuer         = var.issuer
+  subject        = var.subject
 }

+ 2 - 2
terraform/azure/service-principal/output.tf

@@ -1,5 +1,5 @@
-output "application_id" {
-  value = azuread_application.current.application_id
+output "client_id" {
+  value = azuread_application.current.client_id
 }
 output "sp_id" {
   value = azuread_service_principal.current.id

terraform/azure/service-principal/variables.tf → terraform/azure/infrastructure/service-principal/variables.tf


+ 0 - 5
terraform/azure/variables.tf

@@ -15,11 +15,6 @@ variable "resource_group_location" {
   description = "The Azure Region where the Resource Group should exist"
   default     = "westeurope"
 }
-variable "application_display_name" {
-  type        = string
-  description = "Metadata name to use."
-  default     = "external-secrets-operator"
-}
 
 variable "dns_prefix" {
   type        = string

+ 45 - 0
terraform/azure/kubernetes/main.tf

@@ -0,0 +1,45 @@
+resource "kubernetes_namespace" "eso" {
+  metadata {
+    name = "external-secrets-operator"
+  }
+}
+
+data "azurerm_client_config" "current" {}
+
+data "azuread_application" "eso" {
+  display_name = "managed-e2e-suite-external-secrets-operator"
+}
+
+data "azuread_application" "e2e" {
+  display_name = "managed-e2e-suite-external-secrets-e2e"
+}
+
+// the `e2e` pod itself runs with workload identity and
+// does not rely on client credentials.
+resource "kubernetes_service_account" "e2e" {
+  metadata {
+    name      = "external-secrets-e2e"
+    namespace = "default"
+    annotations = {
+      "azure.workload.identity/client-id" = data.azuread_application.e2e.client_id
+    }
+    labels = {
+      "azure.workload.identity/use" = "true"
+    }
+  }
+  depends_on = [kubernetes_namespace.eso]
+}
+
+resource "kubernetes_service_account" "current" {
+  metadata {
+    name      = "external-secrets-operator"
+    namespace = "external-secrets-operator"
+    annotations = {
+      "azure.workload.identity/client-id" = data.azuread_application.eso.client_id
+    }
+    labels = {
+      "azure.workload.identity/use" = "true"
+    }
+  }
+  depends_on = [kubernetes_namespace.eso]
+}

+ 56 - 0
terraform/azure/kubernetes/provider.tf

@@ -0,0 +1,56 @@
+terraform {
+  required_version = ">= 0.13"
+
+  backend "azurerm" {
+    resource_group_name  = "external-secrets-tfstate-rg"
+    storage_account_name = "esoe2emanagedtfstate"
+    container_name       = "tfstate"
+    key                  = "kubernetes/terraform.tfstate"
+  }
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = "~> 6.0"
+    }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "~> 2.0"
+    }
+    helm = {
+      source  = "hashicorp/helm"
+      version = "~> 3.0"
+    }
+  }
+}
+
+
+provider "azurerm" {
+  features {}
+  subscription_id = "9cb8d43c-2ed5-40e7-aec8-76a177c32c15"
+}
+
+
+data "azurerm_kubernetes_cluster" "this" {
+  name                = var.cluster_name
+  resource_group_name = "external-secrets-e2e"
+}
+
+provider "helm" {
+  kubernetes = {
+    host                   = data.azurerm_kubernetes_cluster.this.kube_config[0].host
+    username               = data.azurerm_kubernetes_cluster.this.kube_config[0].username
+    password               = data.azurerm_kubernetes_cluster.this.kube_config[0].password
+    client_certificate     = base64decode(data.azurerm_kubernetes_cluster.this.kube_config[0].client_certificate)
+    client_key             = base64decode(data.azurerm_kubernetes_cluster.this.kube_config[0].client_key)
+    cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.this.kube_config[0].cluster_ca_certificate)
+  }
+}
+provider "kubernetes" {
+  host                   = data.azurerm_kubernetes_cluster.this.kube_config[0].host
+  username               = data.azurerm_kubernetes_cluster.this.kube_config[0].username
+  password               = data.azurerm_kubernetes_cluster.this.kube_config[0].password
+  client_certificate     = base64decode(data.azurerm_kubernetes_cluster.this.kube_config[0].client_certificate)
+  client_key             = base64decode(data.azurerm_kubernetes_cluster.this.kube_config[0].client_key)
+  cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.this.kube_config[0].cluster_ca_certificate)
+}

+ 5 - 0
terraform/azure/kubernetes/variables.tf

@@ -0,0 +1,5 @@
+variable "cluster_name" {
+  type        = string
+  description = "The name of the Managed Kubernetes Cluster to create"
+  default     = "eso-cluster"
+}

+ 4 - 5
terraform/azure/workload-identity/main.tf

@@ -3,8 +3,7 @@ resource "kubernetes_namespace" "azure-workload-identity-system" {
     annotations = {
       name = "azure-workload-identity-system"
     }
-    name   = "azure-workload-identity-system"
-    labels = var.tags
+    name = "azure-workload-identity-system"
   }
 }
 
@@ -16,8 +15,8 @@ resource "helm_release" "azure-workload-identity-system" {
   wait       = true
   depends_on = [kubernetes_namespace.azure-workload-identity-system]
 
-  set {
+  set = [{
     name  = "azureTenantID"
-    value = var.tenant_id
-  }
+    value = data.azurerm_client_config.current.tenant_id
+  }]
 }

+ 0 - 35
terraform/azure/providers.tf

@@ -1,35 +0,0 @@
-terraform {
-  required_providers {
-    azuread = {
-      source = "hashicorp/azuread"
-    }
-  }
-}
-
-provider "azurerm" {
-  features {}
-  # set this to false when running locally
-  use_oidc = true 
-}
-
-data "azurerm_kubernetes_cluster" "default" {
-  depends_on          = [module.test_aks] # refresh cluster state before reading
-  name                = var.cluster_name
-  resource_group_name = var.resource_group_name
-}
-
-provider "helm" {
-  kubernetes {
-    host                   = data.azurerm_kubernetes_cluster.default.kube_config.0.host
-    client_certificate     = base64decode(data.azurerm_kubernetes_cluster.default.kube_config.0.client_certificate)
-    client_key             = base64decode(data.azurerm_kubernetes_cluster.default.kube_config.0.client_key)
-    cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.default.kube_config.0.cluster_ca_certificate)
-  }
-}
-
-provider "kubernetes" {
-  host                   = data.azurerm_kubernetes_cluster.default.kube_config.0.host
-  client_certificate     = base64decode(data.azurerm_kubernetes_cluster.default.kube_config.0.client_certificate)
-  client_key             = base64decode(data.azurerm_kubernetes_cluster.default.kube_config.0.client_key)
-  cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.default.kube_config.0.cluster_ca_certificate)
-}

+ 0 - 7
terraform/azure/workload-identity/variables.tf

@@ -1,7 +0,0 @@
-variable "tags" {
-  type = map(string)
-}
-variable "tenant_id" {
-  type        = string
-  description = "Azure Tenant ID"
-}

+ 0 - 80
terraform/gcp/eso_gcp_modules/gke/main.tf

@@ -1,80 +0,0 @@
-resource "google_service_account" "default" {
-  project    = var.project_id
-  account_id = var.GCP_GSA_NAME
-}
-
-resource "google_project_iam_member" "secretadmin" {
-  project = var.project_id
-  role    = "roles/secretmanager.admin"
-  member  = "serviceAccount:${google_service_account.default.email}"
-}
-
-resource "google_project_iam_member" "service_account_token_creator" {
-  project = var.project_id
-  role    = "roles/iam.serviceAccountTokenCreator"
-  member  = "serviceAccount:${google_service_account.default.email}"
-}
-
-resource "google_service_account_iam_member" "pod_identity" {
-  role               = "roles/iam.workloadIdentityUser"
-  member             = "serviceAccount:${var.project_id}.svc.id.goog[default/${var.GCP_KSA_NAME}]"
-  service_account_id = google_service_account.default.name
-}
-
-resource "google_service_account_iam_member" "pod_identity_e2e" {
-  role               = "roles/iam.workloadIdentityUser"
-  member             = "serviceAccount:${var.project_id}.svc.id.goog[default/external-secrets-e2e]"
-  service_account_id = google_service_account.default.name
-}
-
-resource "google_container_cluster" "primary" {
-  project                  = var.project_id
-  name                     = "${var.env}-cluster"
-  location                 = var.zone
-  remove_default_node_pool = true
-  initial_node_count       = var.initial_node_count
-  network                  = var.network
-  subnetwork               = var.subnetwork
-  deletion_protection      = false
-  ip_allocation_policy {}
-  workload_identity_config {
-    workload_pool = "${var.project_id}.svc.id.goog"
-  }
-  resource_labels = {
-    "example" = "value"
-  }
-}
-
-resource "google_container_node_pool" "nodes" {
-  project    = var.project_id
-  name       = "${google_container_cluster.primary.name}-node-pool"
-  location   = google_container_cluster.primary.location
-  cluster    = google_container_cluster.primary.name
-  node_count = var.node_count
-
-  node_config {
-    preemptible     = var.preemptible
-    machine_type    = "n1-standard-2"
-    service_account = google_service_account.default.email
-    oauth_scopes = [
-      "https://www.googleapis.com/auth/cloud-platform"
-    ]
-  }
-}
-
-provider "kubernetes" {
-  host                   = "https://${google_container_cluster.primary.endpoint}"
-  token                  = data.google_client_config.default.access_token
-  cluster_ca_certificate = base64decode(google_container_cluster.primary.master_auth.0.cluster_ca_certificate)
-}
-
-data "google_client_config" "default" {}
-
-resource "kubernetes_service_account" "test" {
-  metadata {
-    name = var.GCP_KSA_NAME
-    annotations = {
-      "iam.gke.io/gcp-service-account" : "${var.GCP_GSA_NAME}@${var.project_id}.iam.gserviceaccount.com"
-    }
-  }
-}

+ 0 - 48
terraform/gcp/eso_gcp_modules/gke/variable.tf

@@ -1,48 +0,0 @@
-variable "project_id" {
-  default = "my-project-1475718618821"
-}
-variable "env" {
-  default = "dev"
-}
-variable "region" {
-  default = "europe-west1"
-}
-variable "zone" {
-  default = "europe-west1-b"
-}
-variable "zones" {
-  default = ["europe-west1-a", "europe-west1-b", "europe-west1-c"]
-}
-variable "network" {
-  default = "dev-vpc"
-}
-variable "subnetwork" {
-  default = "dev-subnetwork"
-}
-variable "ip_pod_range" {
-  default = "dev-pod-ip-range"
-}
-variable "ip_service_range" {
-  default = "dev-service-ip-range"
-}
-variable "horizontal_pod_autoscaling" {
-  default = false
-}
-variable "node_count" {
-  default = 2
-}
-variable "node_min_count" {
-  default = 2
-}
-variable "node_max_count" {
-  default = 2
-}
-variable "initial_node_count" {
-  default = 2
-}
-variable "preemptible" {
-  default = true
-}
-
-variable "GCP_GSA_NAME" {type = string}
-variable "GCP_KSA_NAME" {type = string}

+ 0 - 29
terraform/gcp/eso_gcp_modules/network/main.tf

@@ -1,29 +0,0 @@
-resource "google_compute_network" "env-vpc" {
-  project                 = var.project_id
-  name                    = "${var.env}-vpc"
-  auto_create_subnetworks = false
-}
-
-resource "google_compute_subnetwork" "env-subnet" {
-  project       = var.project_id
-  name          = "${google_compute_network.env-vpc.name}-subnet"
-  region        = var.region
-  network       = google_compute_network.env-vpc.name
-  ip_cidr_range = "10.10.0.0/24"
-}
-
-output "vpc-name" {
-  value = google_compute_network.env-vpc.name
-}
-output "vpc-id" {
-  value = google_compute_network.env-vpc.id
-}
-output "vpc-object" {
-  value = google_compute_network.env-vpc.self_link
-}
-output "subnet-name" {
-  value = google_compute_subnetwork.env-subnet.name
-}
-output "subnet-ip_cidr_range" {
-  value = google_compute_subnetwork.env-subnet.ip_cidr_range
-}

+ 0 - 18
terraform/gcp/eso_gcp_modules/network/variable.tf

@@ -1,18 +0,0 @@
-variable "env" {
-  default = "dev"
-}
-variable "ip_cidr_range" {
-  default = "10.69.0.0/16"
-}
-variable "ip_pod_range" {
-  default = "10.70.0.0/16"
-}
-variable "ip_service_range" {
-  default = "10.71.0.0/16"
-}
-variable "region" {
-  default = "europe-west1"
-}
-variable "project_id" {
-  type = string
-}

+ 26 - 0
terraform/gcp/infrastructure/main.tf

@@ -0,0 +1,26 @@
+locals {
+  credentials_path = "secrets/gcloud-service-account-key.json"
+}
+
+module "network" {
+  source     = "./modules/network"
+  region     = var.GCP_FED_REGION
+  project_id = var.GCP_FED_PROJECT_ID
+}
+
+module "cluster" {
+  source       = "./modules/gke"
+  project_id   = var.GCP_FED_PROJECT_ID
+  region       = var.GCP_FED_REGION
+  cluster_name = var.GCP_GKE_CLUSTER
+  network      = module.network.network_name
+  subnetwork   = module.network.subnetwork_name
+
+  workload_identity_users = [
+    # eso provider which is set up by e2e tests to 
+    # assert eso functionality.
+    var.GCP_KSA_NAME,
+    # e2e test runner which orchestrates the tests
+    "external-secrets-e2e",
+  ]
+}

+ 39 - 0
terraform/gcp/infrastructure/modules/gke/main.tf

@@ -0,0 +1,39 @@
+resource "google_service_account" "default" {
+  project    = var.project_id
+  account_id = "e2e-managed-secretmanager"
+}
+
+resource "google_project_iam_member" "secretadmin" {
+  project = var.project_id
+  role    = "roles/secretmanager.admin"
+  member  = "serviceAccount:${google_service_account.default.email}"
+}
+
+resource "google_project_iam_member" "service_account_token_creator" {
+  project = var.project_id
+  role    = "roles/iam.serviceAccountTokenCreator"
+  member  = "serviceAccount:${google_service_account.default.email}"
+}
+
+resource "google_service_account_iam_member" "pod_identity" {
+  for_each           = toset(var.workload_identity_users)
+  role               = "roles/iam.workloadIdentityUser"
+  member             = "serviceAccount:${var.project_id}.svc.id.goog[default/${each.value}]"
+  service_account_id = google_service_account.default.name
+}
+
+resource "google_container_cluster" "primary" {
+  project             = var.project_id
+  name                = var.cluster_name
+  initial_node_count  = 1
+  network             = var.network
+  subnetwork          = var.subnetwork
+  location            = var.region
+  deletion_protection = false
+
+  ip_allocation_policy {}
+  workload_identity_config {
+    workload_pool = "${var.project_id}.svc.id.goog"
+  }
+}
+

+ 18 - 0
terraform/gcp/infrastructure/modules/gke/variable.tf

@@ -0,0 +1,18 @@
+variable "project_id" {
+  type = string
+}
+variable "region" {
+  type = string
+}
+variable "network" {
+  type = string
+}
+variable "subnetwork" {
+  type = string
+}
+variable "workload_identity_users" {
+  type = list(string)
+}
+variable "cluster_name" {
+  type = string
+}

+ 14 - 0
terraform/gcp/infrastructure/modules/network/main.tf

@@ -0,0 +1,14 @@
+resource "google_compute_network" "vpc" {
+  project                 = var.project_id
+  name                    = "e2e"
+  auto_create_subnetworks = false
+}
+
+resource "google_compute_subnetwork" "subnet" {
+  project       = var.project_id
+  name          = "${google_compute_network.vpc.name}-subnet"
+  region        = var.region
+  network       = google_compute_network.vpc.name
+  ip_cidr_range = "10.10.0.0/24"
+}
+

+ 7 - 0
terraform/gcp/infrastructure/modules/network/output.tf

@@ -0,0 +1,7 @@
+output "network_name" {
+  value = google_compute_network.vpc.name
+}
+
+output "subnetwork_name" {
+  value = google_compute_subnetwork.subnet.name
+}

+ 6 - 0
terraform/gcp/infrastructure/modules/network/variable.tf

@@ -0,0 +1,6 @@
+variable "region" {
+  type = string
+}
+variable "project_id" {
+  type = string
+}

+ 29 - 0
terraform/gcp/infrastructure/provider.tf

@@ -0,0 +1,29 @@
+terraform {
+  backend "gcs" {
+    bucket = "eso-e2e-tfstate"
+    prefix = "gcp-infrastructure"
+  }
+
+  required_providers {
+    google = {
+      source  = "hashicorp/google"
+      version = "~> 7.5"
+    }
+    google-beta = {
+      source  = "hashicorp/google-beta"
+      version = "~> 7.5"
+    }
+  }
+}
+
+provider "google" {
+  project = "external-secrets-operator"
+  region  = "europe-west1"
+  zone    = "europe-west1-b"
+}
+
+provider "google-beta" {
+  project = "external-secrets-operator"
+  region  = "europe-west1"
+  zone    = "europe-west1-b"
+}

+ 12 - 0
terraform/gcp/infrastructure/variable.tf

@@ -0,0 +1,12 @@
+variable "GCP_FED_PROJECT_ID" {
+  type = string
+}
+variable "GCP_KSA_NAME" {
+  type = string
+}
+variable "GCP_GKE_CLUSTER" {
+  type = string
+}
+variable "GCP_FED_REGION" {
+  type = string
+}

+ 8 - 0
terraform/gcp/kubernetes/main.tf

@@ -0,0 +1,8 @@
+resource "kubernetes_service_account" "test" {
+  metadata {
+    name = var.GCP_KSA_NAME
+    annotations = {
+      "iam.gke.io/gcp-service-account" : "e2e-managed-secretmanager@${var.GCP_FED_PROJECT_ID}.iam.gserviceaccount.com"
+    }
+  }
+}

+ 42 - 0
terraform/gcp/kubernetes/provider.tf

@@ -0,0 +1,42 @@
+terraform {
+  backend "gcs" {
+    bucket = "eso-e2e-tfstate"
+    prefix = "gcp-kubernetes"
+  }
+  required_providers {
+    google = {
+      source  = "hashicorp/google"
+      version = "~> 7.5"
+    }
+    google-beta = {
+      source  = "hashicorp/google-beta"
+      version = "~> 7.5"
+    }
+  }
+}
+
+provider "google" {
+  project = "external-secrets-operator"
+  region  = "europe-west1"
+}
+
+provider "google-beta" {
+  project = "external-secrets-operator"
+  region  = "europe-west1"
+}
+
+
+data "google_client_config" "default" {}
+
+provider "kubernetes" {
+  host                   = "https://${data.google_container_cluster.this.endpoint}"
+  token                  = data.google_client_config.default.access_token
+  cluster_ca_certificate = base64decode(data.google_container_cluster.this.master_auth.0.cluster_ca_certificate)
+}
+
+
+data "google_container_cluster" "this" {
+  project  = var.GCP_FED_PROJECT_ID
+  location = "europe-west1" # must match ../infrastructure
+  name     = "e2e"
+}

+ 6 - 0
terraform/gcp/kubernetes/variables.tf

@@ -0,0 +1,6 @@
+variable "GCP_FED_PROJECT_ID" {
+  type = string
+}
+variable "GCP_KSA_NAME" {
+  type = string
+}

+ 0 - 29
terraform/gcp/main.tf

@@ -1,29 +0,0 @@
-terraform {
-  backend "gcs" {
-    bucket      = "eso-infra-state"
-    prefix      = "eso-infra-state/state"
-    credentials = "secrets/gcloud-service-account-key.json"
-  }
-}
-
-module "test-network" {
-  source        = "./eso_gcp_modules/network"
-  env           = var.env
-  region        = var.region
-  ip_cidr_range = var.ip_cidr_range
-  project_id    = var.GCP_PROJECT_ID
-}
-
-module "test-cluster" {
-  source             = "./eso_gcp_modules/gke"
-  project_id         = var.GCP_PROJECT_ID
-  env                = var.env
-  region             = var.region
-  network            = module.test-network.vpc-object
-  subnetwork         = module.test-network.subnet-name
-  node_count         = var.node_count
-  initial_node_count = var.initial_node_count
-  preemptible        = true
-  GCP_GSA_NAME       = var.GCP_GSA_NAME
-  GCP_KSA_NAME       = var.GCP_KSA_NAME
-}

+ 0 - 13
terraform/gcp/provider.tf

@@ -1,13 +0,0 @@
-provider "google" {
-  project = "external-secrets-operator"
-  region = "europe-west1"
-  zone = "europe-west1-b"
-  credentials = file(var.credentials_path)
-}
-
-provider "google-beta" {
-  project = "external-secrets-operator"
-  region = "europe-west1"
-  zone = "europe-west1-b"
-  credentials = file(var.credentials_path)
-}

+ 0 - 3
terraform/gcp/provider_variables.tf

@@ -1,3 +0,0 @@
-variable "credentials_path" {
-  default = "secrets/gcloud-service-account-key.json"
-}

+ 0 - 16
terraform/gcp/variable.tf

@@ -1,16 +0,0 @@
-variable "env" { default = "test" }
-variable "region" { default = "europe-west1" }
-variable "zone" { default = "europe-west1-b" }
-variable "horizontal_pod_autoscaling" { default = false }
-variable "node_count" { default = 2 }
-variable "node_min_count" { default = 2 }
-variable "node_max_count" { default = 2 }
-variable "initial_node_count" { default = 2 }
-variable "max_scale" { default = "10" }
-variable "ip_cidr_range" { default = "10.69.0.0/16" }
-variable "ip-pod-range" { default = "10.70.0.0/16" }
-variable "ip_service_range" { default = "10.71.0.0/16" }
-variable "preemptible" { default = true }
-variable "GCP_PROJECT_ID" {type = string}
-variable "GCP_GSA_NAME" {type = string}
-variable "GCP_KSA_NAME" {type = string}