diff --git a/.circleci/config.yml b/.circleci/config.yml
index c5c9159e75a47..aec1098be52d8 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -522,6 +522,37 @@ jobs:
- store_test_results:
path: target/reports/
+ itest-cfn-v2-engine-provider:
+ executor: ubuntu-machine-amd64
+ working_directory: /tmp/workspace/repo
+ environment:
+ PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >>
+ steps:
+ - prepare-acceptance-tests
+ - attach_workspace:
+ at: /tmp/workspace
+ - prepare-testselection
+ - prepare-pytest-tinybird
+ - prepare-account-region-randomization
+ - run:
+ name: Test CloudFormation Engine v2
+ environment:
+ PROVIDER_OVERRIDE_CLOUDFORMATION: "engine-v2"
+ TEST_PATH: "tests/aws/services/cloudformation/v2"
+ COVERAGE_ARGS: "-p"
+ # TODO: use docker-run-tests
+ command: |
+ COVERAGE_FILE="target/coverage/.coverage.cloudformation_v2.${CIRCLE_NODE_INDEX}" \
+ PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}${TESTSELECTION_PYTEST_ARGS}--reruns 3 --junitxml=target/reports/cloudformation_v2.xml -o junit_suite_name='cloudformation_v2'" \
+ make test-coverage
+ - persist_to_workspace:
+ root:
+ /tmp/workspace
+ paths:
+ - repo/target/coverage/
+ - store_test_results:
+ path: target/reports/
+
#########################
## Parity Metrics Jobs ##
#########################
@@ -890,6 +921,10 @@ workflows:
requires:
- preflight
- test-selection
+ - itest-cfn-v2-engine-provider:
+ requires:
+ - preflight
+ - test-selection
- unit-tests:
requires:
- preflight
@@ -951,6 +986,7 @@ workflows:
- itest-cloudwatch-v1-provider
- itest-events-v1-provider
- itest-ddb-v2-provider
+ - itest-cfn-v2-engine-provider
- acceptance-tests-amd64
- acceptance-tests-arm64
- integration-tests-amd64
@@ -965,6 +1001,7 @@ workflows:
- itest-cloudwatch-v1-provider
- itest-events-v1-provider
- itest-ddb-v2-provider
+ - itest-cfn-v2-engine-provider
- acceptance-tests-amd64
- acceptance-tests-arm64
- integration-tests-amd64
diff --git a/.github/actions/build-image/action.yml b/.github/actions/build-image/action.yml
new file mode 100644
index 0000000000000..eeb8832cb4494
--- /dev/null
+++ b/.github/actions/build-image/action.yml
@@ -0,0 +1,63 @@
+name: 'Build Image'
+description: 'Composite action which combines all steps necessary to build the LocalStack Community image.'
+inputs:
+ dockerhubPullUsername:
+ description: 'Username to log in to DockerHub to mitigate rate limiting issues with DockerHub.'
+ required: false
+ dockerhubPullToken:
+ description: 'API token to log in to DockerHub to mitigate rate limiting issues with DockerHub.'
+ required: false
+ disableCaching:
+ description: 'Disable Caching'
+ required: false
+outputs:
+ image-artifact-name:
+ description: "Name of the artifact containing the built docker image"
+ value: ${{ steps.image-artifact-name.outputs.image-artifact-name }}
+runs:
+ using: "composite"
+ # This GH Action requires localstack repo in 'localstack' dir + full git history (fetch-depth: 0)
+ steps:
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: 'localstack/.python-version'
+
+ - name: Install docker helper dependencies
+ shell: bash
+ run: pip install --upgrade setuptools setuptools_scm
+
+ - name: Login to Docker Hub
+ # login to DockerHub to avoid rate limiting issues on custom runners
+ uses: docker/login-action@v3
+ if: ${{ inputs.dockerHubPullUsername != '' && inputs.dockerHubPullToken != '' }}
+ with:
+ username: ${{ inputs.dockerhubPullUsername }}
+ password: ${{ inputs.dockerhubPullToken }}
+
+ - name: Build Docker Image
+ id: build-image
+ shell: bash
+ env:
+ DOCKER_BUILD_FLAGS: "--load ${{ inputs.disableCaching == 'true' && '--no-cache' || '' }}"
+ PLATFORM: ${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}
+ DOCKERFILE: ../Dockerfile
+ DOCKER_BUILD_CONTEXT: ..
+ IMAGE_NAME: "localstack/localstack"
+ working-directory: localstack/localstack-core
+ run: |
+ ../bin/docker-helper.sh build
+ ../bin/docker-helper.sh save
+
+ - name: Store Docker Image as Artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: localstack-docker-image-${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}
+ # the path is defined by the "save" command of the docker-helper, which sets a GitHub output "IMAGE_FILENAME"
+ path: localstack/localstack-core/${{ steps.build-image.outputs.IMAGE_FILENAME || steps.build-test-image.outputs.IMAGE_FILENAME}}
+ retention-days: 1
+
+ - name: Set image artifact name as output
+ id: image-artifact-name
+ shell: bash
+ run: echo "image-artifact-name=localstack-docker-image-${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_OUTPUT
diff --git a/.github/actions/load-localstack-docker-from-artifacts/action.yml b/.github/actions/load-localstack-docker-from-artifacts/action.yml
new file mode 100644
index 0000000000000..97215dedb1042
--- /dev/null
+++ b/.github/actions/load-localstack-docker-from-artifacts/action.yml
@@ -0,0 +1,31 @@
+name: 'Load Localstack Docker image'
+description: 'Composite action that loads a LocalStack Docker image from a tar archive stored in GitHub Workflow Artifacts into the local Docker image cache'
+inputs:
+ platform:
+ required: false
+ description: Target architecture for running the Docker image
+ default: "amd64"
+runs:
+ using: "composite"
+ steps:
+ - name: Download Docker Image
+ uses: actions/download-artifact@v4
+ with:
+ name: localstack-docker-image-${{ inputs.platform }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: '.python-version'
+ cache: 'pip'
+ cache-dependency-path: 'requirements-dev.txt'
+
+ - name: Install docker helper dependencies
+ shell: bash
+ run: pip install --upgrade setuptools setuptools_scm
+
+ - name: Load Docker Image
+ shell: bash
+ env:
+ PLATFORM: ${{ inputs.platform }}
+ run: bin/docker-helper.sh load
diff --git a/.github/actions/setup-tests-env/action.yml b/.github/actions/setup-tests-env/action.yml
new file mode 100644
index 0000000000000..bb8c467628165
--- /dev/null
+++ b/.github/actions/setup-tests-env/action.yml
@@ -0,0 +1,22 @@
+name: 'Setup Test Environment'
+description: 'Composite action which combines all steps necessary to setup the runner for test execution'
+runs:
+ using: "composite"
+ steps:
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: '.python-version'
+ cache: 'pip'
+ cache-dependency-path: 'requirements-dev.txt'
+
+ - name: Install Community Dependencies
+ shell: bash
+ run: make install-dev
+
+ - name: Setup environment
+ shell: bash
+ run: |
+ make install
+ mkdir -p target/reports
+ mkdir -p target/coverage
diff --git a/.github/workflows/aws-main.yml b/.github/workflows/aws-main.yml
new file mode 100644
index 0000000000000..9a8ffecbea409
--- /dev/null
+++ b/.github/workflows/aws-main.yml
@@ -0,0 +1,170 @@
+name: AWS / Build, Test, Push
+
+on:
+ schedule:
+ - cron: 0 2 * * MON-FRI
+ push:
+ paths:
+ - '**'
+ - '.github/actions/**'
+ - '.github/workflows/aws-main.yml'
+ - '.github/workflows/aws-tests.yml'
+ - '!CODEOWNERS'
+ - '!README.md'
+ - '!.gitignore'
+ - '!.git-blame-ignore-revs'
+ - '!.github/**'
+ branches:
+ - master
+ workflow_dispatch:
+ inputs:
+ onlyAcceptanceTests:
+ description: 'Only run acceptance tests'
+ required: false
+ type: boolean
+ default: false
+ enableTestSelection:
+ description: 'Enable Test Selection'
+ required: false
+ type: boolean
+ default: false
+ disableCaching:
+ description: 'Disable Caching'
+ required: false
+ type: boolean
+ default: false
+ PYTEST_LOGLEVEL:
+ type: choice
+ description: Loglevel for PyTest
+ options:
+ - DEBUG
+ - INFO
+ - WARNING
+ - ERROR
+ - CRITICAL
+ default: WARNING
+
+env:
+ # Docker Image name and default tag used by docker-helper.sh
+ IMAGE_NAME: "localstack/localstack"
+ DEFAULT_TAG: "latest"
+ PLATFORM_NAME_AMD64: "amd64"
+ PLATFORM_NAME_ARM64: "arm64"
+
+jobs:
+ test:
+ name: "Run integration tests"
+ uses: ./.github/workflows/aws-tests.yml
+ with:
+ # onlyAcceptance test is either explicitly set, or it's a push event.
+ # otherwise it's false (schedule event, workflow_dispatch event without setting it to true)
+ onlyAcceptanceTests: ${{ inputs.onlyAcceptanceTests == true || github.event_name == 'push' }}
+ # default "disableCaching" to `false` if it's a push or schedule event
+ disableCaching: ${{ inputs.disableCaching == true }}
+ # default "disableTestSelection" to `true` if it's a push or schedule event
+ disableTestSelection: ${{ inputs.enableTestSelection != true }}
+ PYTEST_LOGLEVEL: ${{ inputs.PYTEST_LOGLEVEL }}
+ secrets:
+ DOCKERHUB_PULL_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ DOCKERHUB_PULL_TOKEN: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+
+ push:
+ name: "Push Images"
+ runs-on: ubuntu-latest
+ # push image on master, target branch not set, and the dependent steps were either successful or skipped
+ # TO-DO: enable job after workflow in CircleCI is disabled
+ if: false
+# if: github.ref == 'refs/heads/master' && !failure() && !cancelled()
+ needs:
+ # all tests need to be successful for the image to be pushed
+ - test
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Load Localstack ${{ env.PLATFORM_NAME_AMD64 }} Docker Image
+ uses: localstack/localstack/.github/actions/load-localstack-docker-from-artifacts@master
+ with:
+ platform: ${{ env.PLATFORM_NAME_AMD64 }}
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@v4
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ aws-region: us-east-1
+
+ - name: Login to Amazon ECR
+ id: login-ecr
+ uses: aws-actions/amazon-ecr-login@v2
+ with:
+ registry-type: public
+
+ - name: Push ${{ env.PLATFORM_NAME_AMD64 }} Docker Image
+ env:
+ DOCKER_USERNAME: ${{ secrets.DOCKERHUB_PUSH_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PUSH_TOKEN }}
+ PLATFORM: ${{ env.PLATFORM_NAME_AMD64 }}
+ run: |
+ # Push to Docker Hub
+ ./bin/docker-helper.sh push
+ # Push to Amazon Public ECR
+ TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push
+
+ - name: Load Localstack ${{ env.PLATFORM_NAME_ARM64 }} Docker Image
+ uses: localstack/localstack/.github/actions/load-localstack-docker-from-artifacts@master
+ with:
+ platform: ${{ env.PLATFORM_NAME_ARM64 }}
+
+ - name: Push ${{ env.PLATFORM_NAME_ARM64 }} Docker Image
+ env:
+ DOCKER_USERNAME: ${{ secrets.DOCKERHUB_PUSH_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PUSH_TOKEN }}
+ PLATFORM: ${{ env.PLATFORM_NAME_ARM64 }}
+ run: |
+ # Push to Docker Hub
+ ./bin/docker-helper.sh push
+ # Push to Amazon Public ECR
+ TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push
+
+ - name: Push Multi-Arch Manifest
+ env:
+ DOCKER_USERNAME: ${{ secrets.DOCKERHUB_PUSH_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PUSH_TOKEN }}
+ run: |
+ # Push to Docker Hub
+ ./bin/docker-helper.sh push-manifests
+ # Push to Amazon Public ECR
+ IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push-manifests
+
+ - name: Publish dev release
+ env:
+ DOCKER_USERNAME: ${{ secrets.DOCKERHUB_PUSH_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PUSH_TOKEN }}
+ run: |
+ if git describe --exact-match --tags >/dev/null 2>&1; then
+ echo "not publishing a dev release as this is a tagged commit"
+ else
+ source .venv/bin/activate
+ make publish || echo "dev release failed (maybe it is already published)"
+ fi
+
+ cleanup:
+ name: "Cleanup"
+ runs-on: ubuntu-latest
+ # only remove the image artifacts if the build was successful
+ # (this allows a re-build of failed jobs until for the time of the retention period)
+ if: success()
+ needs: push
+ steps:
+ - uses: geekyeggo/delete-artifact@v5
+ with:
+ # delete the docker images shared within the jobs (storage on GitHub is expensive)
+ name: |
+ localstack-docker-image-*
+ lambda-common-*
+ failOnError: false
+ token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/aws-tests-mamr.yml b/.github/workflows/aws-tests-mamr.yml
new file mode 100644
index 0000000000000..769bf355eaa6c
--- /dev/null
+++ b/.github/workflows/aws-tests-mamr.yml
@@ -0,0 +1,63 @@
+name: AWS / MA/MR tests
+
+on:
+ schedule:
+ - cron: 0 1 * * MON-FRI
+ pull_request:
+ paths:
+ - '.github/workflows/aws-tests-mamr.yml'
+ - '.github/workflows/aws-tests.yml'
+ workflow_dispatch:
+ inputs:
+ disableCaching:
+ description: 'Disable Caching'
+ required: false
+ type: boolean
+ default: false
+ PYTEST_LOGLEVEL:
+ type: choice
+ description: Loglevel for PyTest
+ options:
+ - DEBUG
+ - INFO
+ - WARNING
+ - ERROR
+ - CRITICAL
+ default: WARNING
+
+env:
+ IMAGE_NAME: "localstack/localstack"
+ TINYBIRD_DATASOURCE: "community_tests_circleci_ma_mr"
+
+jobs:
+ generate-random-creds:
+ name: "Generate random AWS credentials"
+ runs-on: ubuntu-latest
+ outputs:
+ region: ${{ steps.generate-aws-values.outputs.region }}
+ account_id: ${{ steps.generate-aws-values.outputs.account_id }}
+ steps:
+ - name: Generate values
+ id: generate-aws-values
+ run: |
+ # Generate a random 12-digit number for TEST_AWS_ACCOUNT_ID
+ ACCOUNT_ID=$(shuf -i 100000000000-999999999999 -n 1)
+ echo "account_id=$ACCOUNT_ID" >> $GITHUB_OUTPUT
+ # Set TEST_AWS_REGION_NAME to a random AWS region other than us-east-1
+ REGIONS=("us-east-2" "us-west-1" "us-west-2" "ap-southeast-2" "ap-northeast-1" "eu-central-1" "eu-west-1")
+ REGION=${REGIONS[RANDOM % ${#REGIONS[@]}]}
+ echo "region=$REGION" >> $GITHUB_OUTPUT
+
+ test-ma-mr:
+ name: "Run integration tests"
+ needs: generate-random-creds
+ uses: ./.github/workflows/aws-tests.yml
+ with:
+ disableCaching: ${{ inputs.disableCaching == true }}
+ PYTEST_LOGLEVEL: ${{ inputs.PYTEST_LOGLEVEL }}
+ testAWSRegion: ${{ needs.generate-random-creds.outputs.region }}
+ testAWSAccountId: ${{ needs.generate-random-creds.outputs.account_id }}
+ testAWSAccessKeyId: ${{ needs.generate-random-creds.outputs.account_id }}
+ secrets:
+ DOCKERHUB_PULL_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ DOCKERHUB_PULL_TOKEN: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
diff --git a/.github/workflows/aws-tests.yml b/.github/workflows/aws-tests.yml
new file mode 100644
index 0000000000000..09b881e6328e7
--- /dev/null
+++ b/.github/workflows/aws-tests.yml
@@ -0,0 +1,716 @@
+name: AWS Integration Tests
+
+on:
+ workflow_dispatch:
+ inputs:
+ disableCaching:
+ description: 'Disable Caching'
+ required: false
+ type: boolean
+ default: false
+ PYTEST_LOGLEVEL:
+ type: choice
+ description: Loglevel for PyTest
+ options:
+ - DEBUG
+ - INFO
+ - WARNING
+ - ERROR
+ - CRITICAL
+ default: WARNING
+ disableTestSelection:
+ description: 'Disable Test Selection'
+ required: false
+ type: boolean
+ default: false
+ randomize-aws-credentials:
+ description: "Randomize AWS credentials"
+ default: false
+ required: false
+ type: boolean
+ onlyAcceptanceTests:
+ description: "Run only acceptance tests"
+ default: false
+ required: false
+ type: boolean
+ testAWSRegion:
+ description: 'AWS test region'
+ required: false
+ type: string
+ default: 'us-east-1'
+ testAWSAccountId:
+ description: 'AWS test account ID'
+ required: false
+ type: string
+ default: '000000000000'
+ testAWSAccessKeyId:
+ description: 'AWS test access key ID'
+ required: false
+ type: string
+ default: 'test'
+ workflow_call:
+ inputs:
+ disableCaching:
+ description: 'Disable Caching'
+ required: false
+ type: boolean
+ default: false
+ PYTEST_LOGLEVEL:
+ type: string
+ required: false
+ description: Loglevel for PyTest
+ default: WARNING
+ disableTestSelection:
+ description: 'Disable Test Selection'
+ required: false
+ type: boolean
+ default: false
+ randomize-aws-credentials:
+ description: "Randomize AWS credentials"
+ default: false
+ required: false
+ type: boolean
+ onlyAcceptanceTests:
+ description: "Run only acceptance tests"
+ default: false
+ required: false
+ type: boolean
+ testAWSRegion:
+ description: 'AWS test region'
+ required: false
+ type: string
+ default: 'us-east-1'
+ testAWSAccountId:
+ description: 'AWS test account ID'
+ required: false
+ type: string
+ default: '000000000000'
+ testAWSAccessKeyId:
+ description: 'AWS test access key ID'
+ required: false
+ type: string
+ default: 'test'
+ secrets:
+ DOCKERHUB_PULL_USERNAME:
+ description: 'A DockerHub username - Used to avoid rate limiting issues.'
+ required: true
+ DOCKERHUB_PULL_TOKEN:
+ description: 'A DockerHub token - Used to avoid rate limiting issues.'
+ required: true
+
+env:
+ PYTEST_LOGLEVEL: ${{ inputs.PYTEST_LOGLEVEL || 'WARNING' }}
+ IMAGE_NAME: "localstack/localstack"
+ TINYBIRD_DATASOURCE: "community_tests_integration"
+ TESTSELECTION_PYTEST_ARGS: "${{ !inputs.disableTestSelection && '--path-filter=dist/testselection/test-selection.txt ' || '' }}"
+ TEST_AWS_REGION_NAME: ${{ inputs.testAWSRegion }}
+ TEST_AWS_ACCOUNT_ID: ${{ inputs.testAWSAccountId }}
+ TEST_AWS_ACCESS_KEY_ID: ${{ inputs.testAWSAccessKeyId }}
+
+jobs:
+ build:
+ name: "Build Docker Image (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }})"
+ needs:
+ - test-preflight
+ strategy:
+ matrix:
+ runner:
+ - ubuntu-latest
+ - ubuntu-24.04-arm
+ exclude:
+ # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true
+ - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}}
+ fail-fast: false
+ runs-on: ${{ matrix.runner }}
+ steps:
+ - name: Determine Runner Architecture
+ shell: bash
+ run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV
+
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ path: localstack
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Build Image
+ uses: localstack/localstack/.github/actions/build-image@master
+ with:
+ disableCaching: ${{ inputs.disableCaching == true && 'true' || 'false' }}
+ dockerhubPullUsername: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ dockerhubPullToken: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+
+ - name: Restore Lambda common runtime packages
+ id: cached-lambda-common-restore
+ if: inputs.disableCaching != true
+ uses: actions/cache/restore@v4
+ with:
+ path: localstack/tests/aws/services/lambda_/functions/common
+ key: common-it-${{ runner.os }}-${{ runner.arch }}-lambda-common-${{ hashFiles('localstack/tests/aws/services/lambda_/functions/common/**/src/*', 'localstack/tests/aws/services/lambda_/functions/common/**/Makefile') }}
+
+ - name: Prebuild lambda common packages
+ run: ./localstack/scripts/build_common_test_functions.sh `pwd`/localstack/tests/aws/services/lambda_/functions/common
+
+ - name: Save Lambda common runtime packages
+ if: inputs.disableCaching != true
+ uses: actions/cache/save@v4
+ with:
+ path: localstack/tests/aws/services/lambda_/functions/common
+ key: ${{ steps.cached-lambda-common-restore.outputs.cache-primary-key }}
+
+ - name: Archive Lambda common packages
+ uses: actions/upload-artifact@v4
+ with:
+ name: lambda-common-${{ env.PLATFORM }}
+ path: |
+ localstack/tests/aws/services/lambda_/functions/common
+ retention-days: 1
+
+
+ test-preflight:
+ name: "Preflight & Unit-Tests"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Prepare Local Test Environment
+ uses: localstack/localstack/.github/actions/setup-tests-env@master
+
+ - name: Linting
+ run: make lint
+
+ - name: Check AWS compatibility markers
+ run: make check-aws-markers
+
+ - name: Determine Test Selection
+ if: ${{ env.TESTSELECTION_PYTEST_ARGS }}
+ run: |
+ source .venv/bin/activate
+ if [ -z "${{ github.event.pull_request.base.sha }}" ]; then
+ echo "Do test selection based on branch name"
+ else
+ echo "Do test selection based on Pull Request event"
+ SCRIPT_OPTS="--base-commit-sha ${{ github.event.pull_request.base.sha }} --head-commit-sha ${{ github.event.pull_request.head.sha }}"
+ fi
+ source .venv/bin/activate
+ python -m localstack.testing.testselection.scripts.generate_test_selection $(pwd) dist/testselection/test-selection.txt $SCRIPT_OPTS || (mkdir -p dist/testselection && echo "SENTINEL_ALL_TESTS" >> dist/testselection/test-selection.txt)
+ echo "Test selection:"
+ cat dist/testselection/test-selection.txt
+
+ - name: Archive Test Selection
+ if: ${{ env.TESTSELECTION_PYTEST_ARGS }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-selection
+ path: |
+ dist/testselection/test-selection.txt
+ retention-days: 1
+
+ - name: Run Unit Tests
+ timeout-minutes: 8
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ DEBUG: 1
+ TEST_PATH: "tests/unit"
+ JUNIT_REPORTS_FILE: "pytest-junit-unit.xml"
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} -o junit_suite_name=unit-tests"
+ COVERAGE_FILE: ".coverage.unit"
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}-unit
+ CI_JOB_ID: ${{ github.job }}-unit
+ run: make test-coverage
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-preflight
+ include-hidden-files: true
+ path: |
+ pytest-junit-unit.xml
+ .coverage.unit
+ retention-days: 30
+
+ test-integration:
+ name: "Integration Tests (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }} - ${{ matrix.group }})"
+ if: ${{ !inputs.onlyAcceptanceTests }}
+ needs:
+ - build
+ - test-preflight
+ strategy:
+ matrix:
+ group: [ 1, 2, 3, 4 ]
+ runner:
+ - ubuntu-latest
+ - ubuntu-24.04-arm
+ exclude:
+ # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true
+ - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}}
+ fail-fast: false
+ runs-on: ${{ matrix.runner }}
+ env:
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }}
+ CI_JOB_ID: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }}
+ steps:
+ - name: Determine Runner Architecture
+ shell: bash
+ run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV
+
+ - name: Login to Docker Hub
+ # login to DockerHub to avoid rate limiting issues on custom runners
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+
+ - name: Set environment
+ if: ${{ inputs.testEnvironmentVariables != ''}}
+ shell: bash
+ run: |
+ echo "${{ inputs.testEnvironmentVariables }}" | sed "s/;/\n/" >> $GITHUB_ENV
+
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Download Lambda Common packages
+ uses: actions/download-artifact@v4
+ with:
+ name: lambda-common-${{ env.PLATFORM }}
+ path: |
+ tests/aws/services/lambda_/functions/common
+
+ - name: Load Localstack Docker Image
+ uses: localstack/localstack/.github/actions/load-localstack-docker-from-artifacts@master
+ with:
+ platform: "${{ env.PLATFORM }}"
+
+ - name: Download Test Selection
+ if: ${{ env.TESTSELECTION_PYTEST_ARGS }}
+ uses: actions/download-artifact@v4
+ with:
+ name: test-selection
+ path: dist/testselection/
+
+ - name: Run Integration Tests
+ timeout-minutes: 120
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}${{ env.TESTSELECTION_PYTEST_ARGS }} --splits 4 --group ${{ matrix.group }} --store-durations --clean-durations --ignore=tests/unit/ --ignore=tests/bootstrap"
+ COVERAGE_FILE: "target/.coverage.integration-${{ env.PLATFORM }}-${{ matrix.group }}"
+ JUNIT_REPORTS_FILE: "target/pytest-junit-integration-${{ env.PLATFORM }}-${{ matrix.group }}.xml"
+ DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+ # increase Docker SDK timeout to avoid timeouts on BuildJet runners - https://github.com/docker/docker-py/issues/2266
+ DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS: 300
+ run: make docker-run-tests
+
+ - name: Archive Test Durations
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: pytest-split-durations-${{ env.PLATFORM }}-${{ matrix.group }}
+ path: .test_durations
+ include-hidden-files: true
+ retention-days: 5
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-integration-${{ env.PLATFORM }}-${{ matrix.group }}
+ include-hidden-files: true
+ path: |
+ target/pytest-junit-integration-${{ env.PLATFORM }}-${{ matrix.group }}.xml
+ target/.coverage.integration-${{ env.PLATFORM }}-${{ matrix.group }}
+ retention-days: 30
+
+ test-bootstrap:
+ name: Test Bootstrap
+ if: ${{ !inputs.onlyAcceptanceTests }}
+ runs-on: ubuntu-latest
+ needs:
+ - test-preflight
+ - build
+ timeout-minutes: 60
+ env:
+ PLATFORM: 'amd64'
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}
+ CI_JOB_ID: ${{ github.job }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Prepare Local Test Environment
+ uses: localstack/localstack/.github/actions/setup-tests-env@master
+
+ - name: Load Localstack Docker Image
+ uses: localstack/localstack/.github/actions/load-localstack-docker-from-artifacts@master
+ with:
+ platform: "${{ env.PLATFORM }}"
+
+ - name: Run Bootstrap Tests
+ timeout-minutes: 30
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ TEST_PATH: "tests/bootstrap"
+ COVERAGE_FILE: ".coverage.bootstrap"
+ JUNIT_REPORTS_FILE: "pytest-junit-bootstrap.xml"
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} -o junit_suite_name=bootstrap-tests"
+ run: make test-coverage
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-bootstrap
+ include-hidden-files: true
+ path: |
+ pytest-junit-bootstrap.xml
+ .coverage.bootstrap
+ retention-days: 30
+
+ test-acceptance:
+ name: "Acceptance Tests (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }}"
+ needs:
+ - build
+ strategy:
+ matrix:
+ runner:
+ - ubuntu-latest
+ - ubuntu-24.04-arm
+ exclude:
+ # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true
+ - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}}
+ fail-fast: false
+ runs-on: ${{ matrix.runner }}
+ env:
+ # Acceptance tests are executed for all test cases, without any test selection
+ TESTSELECTION_PYTEST_ARGS: ""
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }}
+ CI_JOB_ID: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }}
+ steps:
+ - name: Determine Runner Architecture
+ shell: bash
+ run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV
+
+ - name: Login to Docker Hub
+ # login to DockerHub to avoid rate limiting issues on custom runners
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+
+ - name: Set environment
+ if: ${{ inputs.testEnvironmentVariables != ''}}
+ shell: bash
+ run: |
+ echo "${{ inputs.testEnvironmentVariables }}" | sed "s/;/\n/" >> $GITHUB_ENV
+
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Load Localstack Docker Image
+ uses: localstack/localstack/.github/actions/load-localstack-docker-from-artifacts@master
+ with:
+ platform: "${{ env.PLATFORM }}"
+
+ - name: Run Acceptance Tests
+ timeout-minutes: 120
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ DEBUG: 1
+ LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC: 1
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}${{ env.TESTSELECTION_PYTEST_ARGS }} --reruns 3 -m acceptance_test -o junit_suite_name='acceptance_test'"
+ COVERAGE_FILE: "target/.coverage.acceptance-${{ env.PLATFORM }}"
+ JUNIT_REPORTS_FILE: "target/pytest-junit-acceptance-${{ env.PLATFORM }}.xml"
+ TEST_PATH: "tests/aws/"
+ DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+ run: make docker-run-tests
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-acceptance-${{ env.PLATFORM }}
+ include-hidden-files: true
+ path: |
+ target/pytest-junit-acceptance-${{ env.PLATFORM }}.xml
+ target/.coverage.acceptance-${{ env.PLATFORM }}
+ retention-days: 30
+
+ test-cloudwatch-v1:
+ name: Test CloudWatch V1
+ if: ${{ !inputs.onlyAcceptanceTests }}
+ runs-on: ubuntu-latest
+ needs:
+ - test-preflight
+ - build
+ timeout-minutes: 60
+ env:
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}
+ CI_JOB_ID: ${{ github.job }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Prepare Local Test Environment
+ uses: localstack/localstack/.github/actions/setup-tests-env@master
+
+ - name: Run Cloudwatch v1 Provider Tests
+ timeout-minutes: 30
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ DEBUG: 1
+ COVERAGE_FILE: ".coverage.cloudwatch_v1"
+ TEST_PATH: "tests/aws/services/cloudwatch/"
+ JUNIT_REPORTS_FILE: "pytest-junit-cloudwatch-v1.xml"
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name=cloudwatch_v1"
+ PROVIDER_OVERRIDE_CLOUDWATCH: "v1"
+ run: make test-coverage
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-cloudwatch-v1
+ include-hidden-files: true
+ path: |
+ pytest-junit-cloudwatch-v1.xml
+ .coverage.cloudwatch_v1
+ retention-days: 30
+
+ test-ddb-v2:
+ name: Test DynamoDB(Streams) v2
+ if: ${{ !inputs.onlyAcceptanceTests }}
+ runs-on: ubuntu-latest
+ needs:
+ - test-preflight
+ - build
+ timeout-minutes: 60
+ env:
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}
+ CI_JOB_ID: ${{ github.job }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Prepare Local Test Environment
+ uses: localstack/localstack/.github/actions/setup-tests-env@master
+
+ - name: Download Test Selection
+ if: ${{ env.TESTSELECTION_PYTEST_ARGS }}
+ uses: actions/download-artifact@v4
+ with:
+ name: test-selection
+ path: dist/testselection/
+
+ - name: Run DynamoDB(Streams) v2 Provider Tests
+ timeout-minutes: 30
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ COVERAGE_FILE: ".coverage.dynamodb_v2"
+ TEST_PATH: "tests/aws/services/dynamodb/ tests/aws/services/dynamodbstreams/ tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py"
+ JUNIT_REPORTS_FILE: "pytest-junit-dynamodb-v2.xml"
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name=dynamodb_v2"
+ PROVIDER_OVERRIDE_DYNAMODB: "v2"
+ run: make test-coverage
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-dynamodb-v2
+ include-hidden-files: true
+ path: |
+ pytest-junit-dynamodb-v2.xml
+ .coverage.dynamodb_v2
+ retention-days: 30
+
+ test-events-v1:
+ name: Test EventBridge v1
+ if: ${{ !inputs.onlyAcceptanceTests }}
+ runs-on: ubuntu-latest
+ needs:
+ - test-preflight
+ - build
+ timeout-minutes: 60
+ env:
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}
+ CI_JOB_ID: ${{ github.job }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Prepare Local Test Environment
+ uses: localstack/localstack/.github/actions/setup-tests-env@master
+
+ - name: Download Test Selection
+ if: ${{ env.TESTSELECTION_PYTEST_ARGS }}
+ uses: actions/download-artifact@v4
+ with:
+ name: test-selection
+ path: dist/testselection/
+
+ - name: Run EventBridge v1 Provider Tests
+ timeout-minutes: 30
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ DEBUG: 1
+ COVERAGE_FILE: ".coverage.events_v1"
+ TEST_PATH: "tests/aws/services/events/"
+ JUNIT_REPORTS_FILE: "pytest-junit-events-v1.xml"
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name=events_v1"
+ PROVIDER_OVERRIDE_EVENTS: "v1"
+ run: make test-coverage
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-events-v1
+ path: |
+ pytest-junit-events-v1.xml
+ .coverage.events_v1
+ retention-days: 30
+
+ test-cfn-v2-engine:
+ name: Test CloudFront Engine v2
+ if: ${{ !inputs.onlyAcceptanceTests }}
+ runs-on: ubuntu-latest
+ needs:
+ - test-preflight
+ - build
+ timeout-minutes: 60
+ env:
+ COVERAGE_FILE: ".coverage.cloudformation_v2"
+ JUNIT_REPORTS_FILE: "pytest-junit-cloudformation-v2.xml"
+ # Set job-specific environment variables for pytest-tinybird
+ CI_JOB_NAME: ${{ github.job }}
+ CI_JOB_ID: ${{ github.job }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Prepare Local Test Environment
+ uses: localstack/localstack/.github/actions/setup-tests-env@master
+
+ - name: Download Test Selection
+ if: ${{ env.TESTSELECTION_PYTEST_ARGS }}
+ uses: actions/download-artifact@v4
+ with:
+ name: test-selection
+ path: dist/testselection/
+
+ - name: Run CloudFormation Engine v2 Tests
+ timeout-minutes: 30
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ TEST_PATH: "tests/aws/services/cloudformation/v2"
+ PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name='cloudformation_v2'"
+ PROVIDER_OVERRIDE_CLOUDFORMATION: "engine-v2"
+ run: make test-coverage
+
+ - name: Archive Test Results
+ uses: actions/upload-artifact@v4
+ if: success() || failure()
+ with:
+ name: test-results-cloudformation-v2
+ include-hidden-files: true
+ path: |
+ ${{ env.COVERAGE_FILE }}
+ ${{ env.JUNIT_REPORTS_FILE }}
+ retention-days: 30
+
+ capture-not-implemented:
+ name: "Capture Not Implemented"
+ if: ${{ !inputs.onlyAcceptanceTests && github.ref == 'refs/heads/master' }}
+ runs-on: ubuntu-latest
+ needs: build
+ env:
+ PLATFORM: 'amd64'
+ steps:
+ - name: Login to Docker Hub
+ # login to DockerHub to avoid rate limiting issues on custom runners
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_PULL_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_PULL_TOKEN }}
+
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ # setuptools_scm requires the git history (at least until the last tag) to determine the version
+ fetch-depth: 0
+
+ - name: Load Localstack Docker Image
+ uses: localstack/localstack/.github/actions/load-localstack-docker-from-artifacts@master
+ with:
+ platform: "${{ env.PLATFORM }}"
+
+ - name: Install Community Dependencies
+ run: make install-dev
+
+ - name: Start LocalStack
+ env:
+ # add the GitHub API token to avoid rate limit issues
+ GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ DISABLE_EVENTS: "1"
+ DEBUG: 1
+ IMAGE_NAME: "localstack/localstack:latest"
+ run: |
+ source .venv/bin/activate
+ localstack start -d
+ localstack wait -t 120 || (localstack logs && false)
+
+ - name: Run capture-not-implemented
+ run: |
+ source .venv/bin/activate
+ cd scripts
+ mkdir ../results
+ python -m capture_notimplemented_responses ../results/
+
+ - name: Print the logs
+ run: |
+ source .venv/bin/activate
+ localstack logs
+
+ - name: Stop localstack
+ run: |
+ source .venv/bin/activate
+ localstack stop
+
+ - name: Archive Capture-Not-Implemented Results
+ uses: actions/upload-artifact@v4
+ with:
+ name: capture-notimplemented
+ path: results/
+ retention-days: 30
diff --git a/.github/workflows/create_artifact_with_features_files.yml b/.github/workflows/create_artifact_with_features_files.yml
new file mode 100644
index 0000000000000..30e87074a19c0
--- /dev/null
+++ b/.github/workflows/create_artifact_with_features_files.yml
@@ -0,0 +1,14 @@
+name: AWS / Archive feature files
+
+on:
+ schedule:
+ - cron: 0 9 * * TUE
+ workflow_dispatch:
+
+jobs:
+ validate-features-files:
+ name: Create artifact with features files
+ uses: localstack/meta/.github/workflows/create-artifact-with-features-files.yml@main
+ with:
+ artifact_name: 'features-files'
+ aws_services_path: 'localstack-core/localstack/services'
diff --git a/.github/workflows/marker-report.yml b/.github/workflows/marker-report.yml
index 75b5352891324..6992be9827954 100644
--- a/.github/workflows/marker-report.yml
+++ b/.github/workflows/marker-report.yml
@@ -60,7 +60,7 @@ jobs:
- name: Collect marker report
if: ${{ !inputs.createIssue }}
env:
- PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -s --co --disable-warnings --marker-report --marker-report-tinybird-upload"
+ PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -p no:localstack.testing.pytest.cloudformation.fixtures -s --co --disable-warnings --marker-report --marker-report-tinybird-upload"
MARKER_REPORT_PROJECT_NAME: localstack
MARKER_REPORT_TINYBIRD_TOKEN: ${{ secrets.MARKER_REPORT_TINYBIRD_TOKEN }}
MARKER_REPORT_COMMIT_SHA: ${{ github.sha }}
@@ -71,7 +71,7 @@ jobs:
# makes use of the marker report plugin localstack.testing.pytest.marker_report
- name: Generate marker report
env:
- PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -s --co --disable-warnings --marker-report --marker-report-path './target'"
+ PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -p no:localstack.testing.pytest.cloudformation.fixtures -p no: -s --co --disable-warnings --marker-report --marker-report-path './target'"
MARKER_REPORT_PROJECT_NAME: localstack
MARKER_REPORT_COMMIT_SHA: ${{ github.sha }}
run: |
diff --git a/.github/workflows/pr-validate-features-files.yml b/.github/workflows/pr-validate-features-files.yml
new file mode 100644
index 0000000000000..d62d2b5ffaa77
--- /dev/null
+++ b/.github/workflows/pr-validate-features-files.yml
@@ -0,0 +1,14 @@
+name: Validate AWS features files
+
+on:
+ pull_request:
+ paths:
+ - localstack-core/localstack/services/**
+ branches:
+ - master
+
+jobs:
+ validate-features-files:
+ uses: localstack/meta/.github/workflows/pr-validate-features-files.yml@main
+ with:
+ aws_services_path: 'localstack-core/localstack/services'
diff --git a/.github/workflows/pr-welcome-first-time-contributors.yml b/.github/workflows/pr-welcome-first-time-contributors.yml
index a68fedb4dc899..c01b376ececde 100644
--- a/.github/workflows/pr-welcome-first-time-contributors.yml
+++ b/.github/workflows/pr-welcome-first-time-contributors.yml
@@ -16,8 +16,8 @@ jobs:
with:
github-token: ${{ secrets.PRO_ACCESS_TOKEN }}
script: |
- const issueMessage = `Welcome to LocalStack! Thanks for reporting your first issue and our team will be working towards fixing the issue for you or reach out for more background information. We recommend joining our [Slack Community](https://localstack.cloud/contact/) for real-time help and drop a message to LocalStack Pro Support if you are a Pro user! If you are willing to contribute towards fixing this issue, please have a look at our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md) and our [contributing guide](https://docs.localstack.cloud/contributing/).`;
- const prMessage = `Welcome to LocalStack! Thanks for raising your first Pull Request and landing in your contributions. Our team will reach out with any reviews or feedbacks that we have shortly. We recommend joining our [Slack Community](https://localstack.cloud/contact/) and share your PR on the **#community** channel to share your contributions with us. Please make sure you are following our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md) and our [Code of Conduct](https://github.com/localstack/.github/blob/main/CODE_OF_CONDUCT.md).`;
+ const issueMessage = `Welcome to LocalStack! Thanks for reporting your first issue and our team will be working towards fixing the issue for you or reach out for more background information. We recommend joining our [Slack Community](https://localstack.cloud/slack/) for real-time help and drop a message to [LocalStack Support](https://docs.localstack.cloud/getting-started/help-and-support/) if you are a licensed user! If you are willing to contribute towards fixing this issue, please have a look at our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md).`;
+ const prMessage = `Welcome to LocalStack! Thanks for raising your first Pull Request and landing in your contributions. Our team will reach out with any reviews or feedbacks that we have shortly. We recommend joining our [Slack Community](https://localstack.cloud/slack/) and share your PR on the **#community** channel to share your contributions with us. Please make sure you are following our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md) and our [Code of Conduct](https://github.com/localstack/.github/blob/main/CODE_OF_CONDUCT.md).`;
if (!issueMessage && !prMessage) {
throw new Error('Action should have either issueMessage or prMessage set');
diff --git a/.github/workflows/tests-cli.yml b/.github/workflows/tests-cli.yml
index a1a3051fc7893..9dda7f376e9d1 100644
--- a/.github/workflows/tests-cli.yml
+++ b/.github/workflows/tests-cli.yml
@@ -98,7 +98,7 @@ jobs:
pip install pytest pytest-tinybird
- name: Run CLI tests
env:
- PYTEST_ADDOPTS: "${{ env.TINYBIRD_PYTEST_ARGS }}-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:localstack.testing.pytest.validation_tracking -p no:localstack.testing.pytest.path_filter -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -s"
+ PYTEST_ADDOPTS: "${{ env.TINYBIRD_PYTEST_ARGS }}-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:localstack.testing.pytest.validation_tracking -p no:localstack.testing.pytest.path_filter -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -p no:localstack.testing.pytest.cloudformation.fixtures -s"
TEST_PATH: "tests/cli/"
run: make test
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e9e87ed90b64a..0e14866fcce53 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,13 +3,19 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: v0.11.2
+ rev: v0.11.8
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
# Run the formatter.
- id: ruff-format
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.15.0
+ hooks:
+ - id: mypy
+ entry: bash -c 'cd localstack-core && mypy --install-types --non-interactive'
+
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
diff --git a/CODEOWNERS b/CODEOWNERS
index 1137baefe445c..e165d6d3cc5d3 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -17,8 +17,8 @@
/Dockerfile @alexrashed
# Git, Pipelines, GitHub config
-/.circleci @alexrashed @dfangl @dominikschubert
-/.github @alexrashed @dfangl @dominikschubert
+/.circleci @alexrashed @dfangl @dominikschubert @silv-io @k-a-il
+/.github @alexrashed @dfangl @dominikschubert @silv-io @k-a-il
/.test_durations @alexrashed
/.git-blame-ignore-revs @alexrashed @thrau
/bin/release-dev.sh @thrau @alexrashed
@@ -185,7 +185,7 @@
/tests/unit/services/opensearch/ @alexrashed @silv-io
# pipes
-/localstack-core/localstack/aws/api/pipes/ @joe4dev @gregfurman
+/localstack-core/localstack/aws/api/pipes/ @tiurin @gregfurman @joe4dev
# route53
/localstack-core/localstack/aws/api/route53/ @giograno
@@ -208,11 +208,6 @@
/localstack-core/localstack/services/s3control/ @bentsku
/tests/aws/services/s3control/ @bentsku
-# scheduler
-/localstack-core/localstack/aws/api/scheduler/ @zaingz @joe4dev
-/localstack-core/localstack/services/scheduler/ @zaingz @joe4dev
-/tests/aws/services/scheduler/ @zaingz @joe4dev
-
# secretsmanager
/localstack-core/localstack/aws/api/secretsmanager/ @dominikschubert @macnev2013 @MEPalma
/localstack-core/localstack/services/secretsmanager/ @dominikschubert @macnev2013 @MEPalma
diff --git a/Dockerfile b/Dockerfile
index 6c5fa4906f3a7..7cfac6990a339 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,7 +1,7 @@
#
# base: Stage which installs necessary runtime dependencies (OS packages, etc.)
#
-FROM python:3.11.11-slim-bookworm@sha256:7029b00486ac40bed03e36775b864d3f3d39dcbdf19cd45e6a52d541e6c178f0 AS base
+FROM python:3.11.12-slim-bookworm@sha256:75a17dd6f00b277975715fc094c4a1570d512708de6bb4c5dc130814813ebfe4 AS base
ARG TARGETARCH
# Install runtime OS package dependencies
diff --git a/Dockerfile.s3 b/Dockerfile.s3
index c128c8690228e..c53190ee7529d 100644
--- a/Dockerfile.s3
+++ b/Dockerfile.s3
@@ -1,5 +1,5 @@
# base: Stage which installs necessary runtime dependencies (OS packages, filesystem...)
-FROM python:3.11.11-slim-bookworm@sha256:7029b00486ac40bed03e36775b864d3f3d39dcbdf19cd45e6a52d541e6c178f0 AS base
+FROM python:3.11.12-slim-bookworm@sha256:75a17dd6f00b277975715fc094c4a1570d512708de6bb4c5dc130814813ebfe4 AS base
ARG TARGETARCH
# set workdir
diff --git a/MANIFEST.in b/MANIFEST.in
index 2afd2693472a3..07442c11a993f 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,10 @@
+exclude .github/**
+exclude .circleci/**
+exclude docs/**
exclude tests/**
exclude .test_durations
+exclude .gitignore
+exclude .pre-commit-config.yaml
+exclude .python-version
include Makefile
include LICENSE.txt
diff --git a/Makefile b/Makefile
index f594d468cc3b6..b2a749b6599c9 100644
--- a/Makefile
+++ b/Makefile
@@ -91,7 +91,7 @@ start: ## Manually start the local infrastructure for testing
($(VENV_RUN); exec bin/localstack start --host)
docker-run-tests: ## Initializes the test environment and runs the tests in a docker container
- docker run -e LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 --entrypoint= -v `pwd`/.git:/opt/code/localstack/.git -v `pwd`/requirements-test.txt:/opt/code/localstack/requirements-test.txt -v `pwd`/tests/:/opt/code/localstack/tests/ -v `pwd`/target/:/opt/code/localstack/target/ -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/localstack:/var/lib/localstack \
+ docker run -e LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 --entrypoint= -v `pwd`/.git:/opt/code/localstack/.git -v `pwd`/requirements-test.txt:/opt/code/localstack/requirements-test.txt -v `pwd`/tests/:/opt/code/localstack/tests/ -v `pwd`/dist/:/opt/code/localstack/dist/ -v `pwd`/target/:/opt/code/localstack/target/ -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/localstack:/var/lib/localstack \
$(IMAGE_NAME):$(DEFAULT_TAG) \
bash -c "make install-test && DEBUG=$(DEBUG) PYTEST_LOGLEVEL=$(PYTEST_LOGLEVEL) PYTEST_ARGS='$(PYTEST_ARGS)' COVERAGE_FILE='$(COVERAGE_FILE)' TEST_PATH='$(TEST_PATH)' LAMBDA_IGNORE_ARCHITECTURE=1 LAMBDA_INIT_POST_INVOKE_WAIT_MS=50 TINYBIRD_PYTEST_ARGS='$(TINYBIRD_PYTEST_ARGS)' TINYBIRD_DATASOURCE='$(TINYBIRD_DATASOURCE)' TINYBIRD_TOKEN='$(TINYBIRD_TOKEN)' TINYBIRD_URL='$(TINYBIRD_URL)' CI_COMMIT_BRANCH='$(CI_COMMIT_BRANCH)' CI_COMMIT_SHA='$(CI_COMMIT_SHA)' CI_JOB_URL='$(CI_JOB_URL)' CI_JOB_NAME='$(CI_JOB_NAME)' CI_JOB_ID='$(CI_JOB_ID)' CI='$(CI)' TEST_AWS_REGION_NAME='${TEST_AWS_REGION_NAME}' TEST_AWS_ACCESS_KEY_ID='${TEST_AWS_ACCESS_KEY_ID}' TEST_AWS_ACCOUNT_ID='${TEST_AWS_ACCOUNT_ID}' make test-coverage"
@@ -110,7 +110,7 @@ docker-cp-coverage:
docker rm -v $$id
test: ## Run automated tests
- ($(VENV_RUN); $(TEST_EXEC) pytest --durations=10 --log-cli-level=$(PYTEST_LOGLEVEL) $(PYTEST_ARGS) $(TEST_PATH))
+ ($(VENV_RUN); $(TEST_EXEC) pytest --durations=10 --log-cli-level=$(PYTEST_LOGLEVEL) --junitxml=$(JUNIT_REPORTS_FILE) $(PYTEST_ARGS) $(TEST_PATH))
test-coverage: LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC = 1
test-coverage: TEST_EXEC = python -m coverage run $(COVERAGE_ARGS) -m
@@ -121,6 +121,7 @@ lint: ## Run code linter to check code style, check if formatte
($(VENV_RUN); python -m ruff check --output-format=full . && python -m ruff format --check --diff .)
$(VENV_RUN); pre-commit run check-pinned-deps-for-needed-upgrade --files pyproject.toml # run pre-commit hook manually here to ensure that this check runs in CI as well
$(VENV_RUN); openapi-spec-validator localstack-core/localstack/openapi.yaml
+ $(VENV_RUN); cd localstack-core && mypy --install-types --non-interactive
lint-modified: ## Run code linter to check code style, check if formatter would make changes on modified files, and check if dependency pins need to be updated because of modified files
($(VENV_RUN); python -m ruff check --output-format=full `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs` && python -m ruff format --check `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs`)
diff --git a/README.md b/README.md
index 856d337effd5e..23c071c33d9d7 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-:zap: We are thrilled to announce the release of LocalStack 4.1 :zap:
+:zap: We are thrilled to announce the release of LocalStack 4.3 :zap:
@@ -93,7 +93,7 @@ Start LocalStack inside a Docker container by running:
/ /___/ /_/ / /__/ /_/ / /___/ / /_/ /_/ / /__/ ,<
/_____/\____/\___/\__,_/_//____/\__/\__,_/\___/_/|_|
-- LocalStack CLI: 4.1.0
+- LocalStack CLI: 4.3.0
- Profile: default
- App: https://app.localstack.cloud
diff --git a/localstack-core/localstack/aws/api/acm/__init__.py b/localstack-core/localstack/aws/api/acm/__init__.py
index f3e00c58471e6..dc9585748f9f7 100644
--- a/localstack-core/localstack/aws/api/acm/__init__.py
+++ b/localstack-core/localstack/aws/api/acm/__init__.py
@@ -23,6 +23,10 @@
ValidationExceptionMessage = str
+class CertificateManagedBy(StrEnum):
+ CLOUDFRONT = "CLOUDFRONT"
+
+
class CertificateStatus(StrEnum):
PENDING_VALIDATION = "PENDING_VALIDATION"
ISSUED = "ISSUED"
@@ -131,6 +135,7 @@ class RevocationReason(StrEnum):
CA_COMPROMISE = "CA_COMPROMISE"
AFFILIATION_CHANGED = "AFFILIATION_CHANGED"
SUPERCEDED = "SUPERCEDED"
+ SUPERSEDED = "SUPERSEDED"
CESSATION_OF_OPERATION = "CESSATION_OF_OPERATION"
CERTIFICATE_HOLD = "CERTIFICATE_HOLD"
REMOVE_FROM_CRL = "REMOVE_FROM_CRL"
@@ -150,6 +155,7 @@ class SortOrder(StrEnum):
class ValidationMethod(StrEnum):
EMAIL = "EMAIL"
DNS = "DNS"
+ HTTP = "HTTP"
class AccessDeniedException(ServiceException):
@@ -285,6 +291,11 @@ class KeyUsage(TypedDict, total=False):
TStamp = datetime
+class HttpRedirect(TypedDict, total=False):
+ RedirectFrom: Optional[String]
+ RedirectTo: Optional[String]
+
+
class ResourceRecord(TypedDict, total=False):
Name: String
Type: RecordType
@@ -300,6 +311,7 @@ class DomainValidation(TypedDict, total=False):
ValidationDomain: Optional[DomainNameString]
ValidationStatus: Optional[DomainStatus]
ResourceRecord: Optional[ResourceRecord]
+ HttpRedirect: Optional[HttpRedirect]
ValidationMethod: Optional[ValidationMethod]
@@ -321,6 +333,7 @@ class CertificateDetail(TypedDict, total=False):
CertificateArn: Optional[Arn]
DomainName: Optional[DomainNameString]
SubjectAlternativeNames: Optional[DomainList]
+ ManagedBy: Optional[CertificateManagedBy]
DomainValidationOptions: Optional[DomainValidationList]
Serial: Optional[String]
Subject: Optional[String]
@@ -370,6 +383,7 @@ class CertificateSummary(TypedDict, total=False):
IssuedAt: Optional[TStamp]
ImportedAt: Optional[TStamp]
RevokedAt: Optional[TStamp]
+ ManagedBy: Optional[CertificateManagedBy]
CertificateSummaryList = List[CertificateSummary]
@@ -422,6 +436,7 @@ class Filters(TypedDict, total=False):
extendedKeyUsage: Optional[ExtendedKeyUsageFilterList]
keyUsage: Optional[KeyUsageFilterList]
keyTypes: Optional[KeyAlgorithmList]
+ managedBy: Optional[CertificateManagedBy]
class GetAccountConfigurationResponse(TypedDict, total=False):
@@ -498,6 +513,7 @@ class RequestCertificateRequest(ServiceRequest):
CertificateAuthorityArn: Optional[PcaArn]
Tags: Optional[TagList]
KeyAlgorithm: Optional[KeyAlgorithm]
+ ManagedBy: Optional[CertificateManagedBy]
class RequestCertificateResponse(TypedDict, total=False):
@@ -619,6 +635,7 @@ def request_certificate(
certificate_authority_arn: PcaArn = None,
tags: TagList = None,
key_algorithm: KeyAlgorithm = None,
+ managed_by: CertificateManagedBy = None,
**kwargs,
) -> RequestCertificateResponse:
raise NotImplementedError
diff --git a/localstack-core/localstack/aws/api/apigateway/__init__.py b/localstack-core/localstack/aws/api/apigateway/__init__.py
index 47bd84435db2f..f8a46b7b5e4c6 100644
--- a/localstack-core/localstack/aws/api/apigateway/__init__.py
+++ b/localstack-core/localstack/aws/api/apigateway/__init__.py
@@ -124,6 +124,11 @@ class IntegrationType(StrEnum):
AWS_PROXY = "AWS_PROXY"
+class IpAddressType(StrEnum):
+ ipv4 = "ipv4"
+ dualstack = "dualstack"
+
+
class LocationStatusType(StrEnum):
DOCUMENTED = "DOCUMENTED"
UNDOCUMENTED = "UNDOCUMENTED"
@@ -449,6 +454,7 @@ class MutualTlsAuthenticationInput(TypedDict, total=False):
class EndpointConfiguration(TypedDict, total=False):
types: Optional[ListOfEndpointType]
+ ipAddressType: Optional[IpAddressType]
vpcEndpointIds: Optional[ListOfString]
diff --git a/localstack-core/localstack/aws/api/cloudformation/__init__.py b/localstack-core/localstack/aws/api/cloudformation/__init__.py
index 32951575e960c..fcd83677aac19 100644
--- a/localstack-core/localstack/aws/api/cloudformation/__init__.py
+++ b/localstack-core/localstack/aws/api/cloudformation/__init__.py
@@ -120,6 +120,7 @@
ResourceStatusReason = str
ResourceToSkip = str
ResourceType = str
+ResourceTypeFilter = str
ResourceTypePrefix = str
ResourcesFailed = int
ResourcesPending = int
@@ -526,6 +527,11 @@ class ResourceStatus(StrEnum):
ROLLBACK_FAILED = "ROLLBACK_FAILED"
+class ScanType(StrEnum):
+ FULL = "FULL"
+ PARTIAL = "PARTIAL"
+
+
class StackDriftDetectionStatus(StrEnum):
DETECTION_IN_PROGRESS = "DETECTION_IN_PROGRESS"
DETECTION_FAILED = "DETECTION_FAILED"
@@ -1536,6 +1542,16 @@ class DescribeResourceScanInput(ServiceRequest):
ResourceScanId: ResourceScanId
+ResourceTypeFilters = List[ResourceTypeFilter]
+
+
+class ScanFilter(TypedDict, total=False):
+ Types: Optional[ResourceTypeFilters]
+
+
+ScanFilters = List[ScanFilter]
+
+
class DescribeResourceScanOutput(TypedDict, total=False):
ResourceScanId: Optional[ResourceScanId]
Status: Optional[ResourceScanStatus]
@@ -1546,6 +1562,7 @@ class DescribeResourceScanOutput(TypedDict, total=False):
ResourceTypes: Optional[ResourceTypes]
ResourcesScanned: Optional[ResourcesScanned]
ResourcesRead: Optional[ResourcesRead]
+ ScanFilters: Optional[ScanFilters]
class DescribeStackDriftDetectionStatusInput(ServiceRequest):
@@ -2246,6 +2263,7 @@ class ListResourceScanResourcesOutput(TypedDict, total=False):
class ListResourceScansInput(ServiceRequest):
NextToken: Optional[NextToken]
MaxResults: Optional[ResourceScannerMaxResults]
+ ScanTypeFilter: Optional[ScanType]
class ResourceScanSummary(TypedDict, total=False):
@@ -2255,6 +2273,7 @@ class ResourceScanSummary(TypedDict, total=False):
StartTime: Optional[Timestamp]
EndTime: Optional[Timestamp]
PercentageCompleted: Optional[PercentageCompleted]
+ ScanType: Optional[ScanType]
ResourceScanSummaries = List[ResourceScanSummary]
@@ -2745,6 +2764,7 @@ class SignalResourceInput(ServiceRequest):
class StartResourceScanInput(ServiceRequest):
ClientRequestToken: Optional[ClientRequestToken]
+ ScanFilters: Optional[ScanFilters]
class StartResourceScanOutput(TypedDict, total=False):
@@ -3482,6 +3502,7 @@ def list_resource_scans(
context: RequestContext,
next_token: NextToken = None,
max_results: ResourceScannerMaxResults = None,
+ scan_type_filter: ScanType = None,
**kwargs,
) -> ListResourceScansOutput:
raise NotImplementedError
@@ -3709,7 +3730,11 @@ def signal_resource(
@handler("StartResourceScan")
def start_resource_scan(
- self, context: RequestContext, client_request_token: ClientRequestToken = None, **kwargs
+ self,
+ context: RequestContext,
+ client_request_token: ClientRequestToken = None,
+ scan_filters: ScanFilters = None,
+ **kwargs,
) -> StartResourceScanOutput:
raise NotImplementedError
diff --git a/localstack-core/localstack/aws/api/ec2/__init__.py b/localstack-core/localstack/aws/api/ec2/__init__.py
index 41c82c1020408..2f92ebd70813b 100644
--- a/localstack-core/localstack/aws/api/ec2/__init__.py
+++ b/localstack-core/localstack/aws/api/ec2/__init__.py
@@ -299,6 +299,10 @@
RetentionPeriodResponseDays = int
RoleId = str
RouteGatewayId = str
+RouteServerEndpointId = str
+RouteServerId = str
+RouteServerMaxResults = int
+RouteServerPeerId = str
RouteTableAssociationId = str
RouteTableId = str
RunInstancesUserData = str
@@ -2312,6 +2316,11 @@ class IpamManagementState(StrEnum):
ignored = "ignored"
+class IpamMeteredAccount(StrEnum):
+ ipam_owner = "ipam-owner"
+ resource_owner = "resource-owner"
+
+
class IpamNetworkInterfaceAttachmentStatus(StrEnum):
available = "available"
in_use = "in-use"
@@ -3016,6 +3025,9 @@ class ResourceType(StrEnum):
verified_access_trust_provider = "verified-access-trust-provider"
vpn_connection_device_type = "vpn-connection-device-type"
vpc_block_public_access_exclusion = "vpc-block-public-access-exclusion"
+ route_server = "route-server"
+ route_server_endpoint = "route-server-endpoint"
+ route_server_peer = "route-server-peer"
ipam_resource_discovery = "ipam-resource-discovery"
ipam_resource_discovery_association = "ipam-resource-discovery-association"
instance_connect_endpoint = "instance-connect-endpoint"
@@ -3034,6 +3046,85 @@ class RouteOrigin(StrEnum):
EnableVgwRoutePropagation = "EnableVgwRoutePropagation"
+class RouteServerAssociationState(StrEnum):
+ associating = "associating"
+ associated = "associated"
+ disassociating = "disassociating"
+
+
+class RouteServerBfdState(StrEnum):
+ up = "up"
+ down = "down"
+
+
+class RouteServerBgpState(StrEnum):
+ up = "up"
+ down = "down"
+
+
+class RouteServerEndpointState(StrEnum):
+ pending = "pending"
+ available = "available"
+ deleting = "deleting"
+ deleted = "deleted"
+ failing = "failing"
+ failed = "failed"
+ delete_failed = "delete-failed"
+
+
+class RouteServerPeerLivenessMode(StrEnum):
+ bfd = "bfd"
+ bgp_keepalive = "bgp-keepalive"
+
+
+class RouteServerPeerState(StrEnum):
+ pending = "pending"
+ available = "available"
+ deleting = "deleting"
+ deleted = "deleted"
+ failing = "failing"
+ failed = "failed"
+
+
+class RouteServerPersistRoutesAction(StrEnum):
+ enable = "enable"
+ disable = "disable"
+ reset = "reset"
+
+
+class RouteServerPersistRoutesState(StrEnum):
+ enabling = "enabling"
+ enabled = "enabled"
+ resetting = "resetting"
+ disabling = "disabling"
+ disabled = "disabled"
+ modifying = "modifying"
+
+
+class RouteServerPropagationState(StrEnum):
+ pending = "pending"
+ available = "available"
+ deleting = "deleting"
+
+
+class RouteServerRouteInstallationStatus(StrEnum):
+ installed = "installed"
+ rejected = "rejected"
+
+
+class RouteServerRouteStatus(StrEnum):
+ in_rib = "in-rib"
+ in_fib = "in-fib"
+
+
+class RouteServerState(StrEnum):
+ pending = "pending"
+ available = "available"
+ modifying = "modifying"
+ deleting = "deleting"
+ deleted = "deleted"
+
+
class RouteState(StrEnum):
active = "active"
blackhole = "blackhole"
@@ -3085,6 +3176,7 @@ class ServiceConnectivityType(StrEnum):
class ServiceManaged(StrEnum):
alb = "alb"
nlb = "nlb"
+ rnat = "rnat"
class ServiceState(StrEnum):
@@ -3632,6 +3724,8 @@ class VpcEncryptionControlState(StrEnum):
deleting = "deleting"
deleted = "deleted"
available = "available"
+ creating = "creating"
+ delete_failed = "delete-failed"
class VpcEndpointType(StrEnum):
@@ -4527,6 +4621,7 @@ class ApplySecurityGroupsToClientVpnTargetNetworkResult(TypedDict, total=False):
ArchitectureTypeList = List[ArchitectureType]
ArchitectureTypeSet = List[ArchitectureType]
ArnList = List[ResourceArn]
+AsPath = List[String]
class AsnAuthorizationContext(TypedDict, total=False):
@@ -4793,6 +4888,22 @@ class AssociateNatGatewayAddressResult(TypedDict, total=False):
NatGatewayAddresses: Optional[NatGatewayAddressList]
+class AssociateRouteServerRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ VpcId: VpcId
+ DryRun: Optional[Boolean]
+
+
+class RouteServerAssociation(TypedDict, total=False):
+ RouteServerId: Optional[RouteServerId]
+ VpcId: Optional[VpcId]
+ State: Optional[RouteServerAssociationState]
+
+
+class AssociateRouteServerResult(TypedDict, total=False):
+ RouteServerAssociation: Optional[RouteServerAssociation]
+
+
class AssociateRouteTableRequest(ServiceRequest):
GatewayId: Optional[RouteGatewayId]
DryRun: Optional[Boolean]
@@ -5459,6 +5570,7 @@ class BlockPublicAccessStates(TypedDict, total=False):
BootModeTypeList = List[BootModeType]
+BoxedLong = int
BundleIdStringList = List[BundleId]
@@ -5998,6 +6110,14 @@ class ClientLoginBannerResponseOptions(TypedDict, total=False):
BannerText: Optional[String]
+class ClientRouteEnforcementOptions(TypedDict, total=False):
+ Enforced: Optional[Boolean]
+
+
+class ClientRouteEnforcementResponseOptions(TypedDict, total=False):
+ Enforced: Optional[Boolean]
+
+
class FederatedAuthentication(TypedDict, total=False):
SamlProviderArn: Optional[String]
SelfServiceSamlProviderArn: Optional[String]
@@ -6096,6 +6216,7 @@ class ClientVpnEndpoint(TypedDict, total=False):
ClientConnectOptions: Optional[ClientConnectResponseOptions]
SessionTimeoutHours: Optional[Integer]
ClientLoginBannerOptions: Optional[ClientLoginBannerResponseOptions]
+ ClientRouteEnforcementOptions: Optional[ClientRouteEnforcementResponseOptions]
DisconnectOnSessionTimeout: Optional[Boolean]
@@ -6445,6 +6566,7 @@ class CreateClientVpnEndpointRequest(ServiceRequest):
ClientConnectOptions: Optional[ClientConnectOptions]
SessionTimeoutHours: Optional[Integer]
ClientLoginBannerOptions: Optional[ClientLoginBannerOptions]
+ ClientRouteEnforcementOptions: Optional[ClientRouteEnforcementOptions]
DisconnectOnSessionTimeout: Optional[Boolean]
@@ -6895,7 +7017,7 @@ class FleetLaunchTemplateOverridesRequest(TypedDict, total=False):
Placement: Optional[Placement]
BlockDeviceMappings: Optional[FleetBlockDeviceMappingRequestList]
InstanceRequirements: Optional[InstanceRequirementsRequest]
- ImageId: Optional[String]
+ ImageId: Optional[ImageId]
FleetLaunchTemplateOverridesListRequest = List[FleetLaunchTemplateOverridesRequest]
@@ -7265,6 +7387,7 @@ class CreateIpamRequest(ServiceRequest):
ClientToken: Optional[String]
Tier: Optional[IpamTier]
EnablePrivateGua: Optional[Boolean]
+ MeteredAccount: Optional[IpamMeteredAccount]
class CreateIpamResourceDiscoveryRequest(ServiceRequest):
@@ -7324,6 +7447,7 @@ class Ipam(TypedDict, total=False):
StateMessage: Optional[String]
Tier: Optional[IpamTier]
EnablePrivateGua: Optional[Boolean]
+ MeteredAccount: Optional[IpamMeteredAccount]
class CreateIpamResult(TypedDict, total=False):
@@ -8477,6 +8601,102 @@ class CreateRouteResult(TypedDict, total=False):
Return: Optional[Boolean]
+class CreateRouteServerEndpointRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ SubnetId: SubnetId
+ ClientToken: Optional[String]
+ DryRun: Optional[Boolean]
+ TagSpecifications: Optional[TagSpecificationList]
+
+
+class RouteServerEndpoint(TypedDict, total=False):
+ RouteServerId: Optional[RouteServerId]
+ RouteServerEndpointId: Optional[RouteServerEndpointId]
+ VpcId: Optional[VpcId]
+ SubnetId: Optional[SubnetId]
+ EniId: Optional[NetworkInterfaceId]
+ EniAddress: Optional[String]
+ State: Optional[RouteServerEndpointState]
+ FailureReason: Optional[String]
+ Tags: Optional[TagList]
+
+
+class CreateRouteServerEndpointResult(TypedDict, total=False):
+ RouteServerEndpoint: Optional[RouteServerEndpoint]
+
+
+class RouteServerBgpOptionsRequest(TypedDict, total=False):
+ PeerAsn: Long
+ PeerLivenessDetection: Optional[RouteServerPeerLivenessMode]
+
+
+class CreateRouteServerPeerRequest(ServiceRequest):
+ RouteServerEndpointId: RouteServerEndpointId
+ PeerAddress: String
+ BgpOptions: RouteServerBgpOptionsRequest
+ DryRun: Optional[Boolean]
+ TagSpecifications: Optional[TagSpecificationList]
+
+
+class RouteServerBfdStatus(TypedDict, total=False):
+ Status: Optional[RouteServerBfdState]
+
+
+class RouteServerBgpStatus(TypedDict, total=False):
+ Status: Optional[RouteServerBgpState]
+
+
+class RouteServerBgpOptions(TypedDict, total=False):
+ PeerAsn: Optional[Long]
+ PeerLivenessDetection: Optional[RouteServerPeerLivenessMode]
+
+
+class RouteServerPeer(TypedDict, total=False):
+ RouteServerPeerId: Optional[RouteServerPeerId]
+ RouteServerEndpointId: Optional[RouteServerEndpointId]
+ RouteServerId: Optional[RouteServerId]
+ VpcId: Optional[VpcId]
+ SubnetId: Optional[SubnetId]
+ State: Optional[RouteServerPeerState]
+ FailureReason: Optional[String]
+ EndpointEniId: Optional[NetworkInterfaceId]
+ EndpointEniAddress: Optional[String]
+ PeerAddress: Optional[String]
+ BgpOptions: Optional[RouteServerBgpOptions]
+ BgpStatus: Optional[RouteServerBgpStatus]
+ BfdStatus: Optional[RouteServerBfdStatus]
+ Tags: Optional[TagList]
+
+
+class CreateRouteServerPeerResult(TypedDict, total=False):
+ RouteServerPeer: Optional[RouteServerPeer]
+
+
+class CreateRouteServerRequest(ServiceRequest):
+ AmazonSideAsn: Long
+ ClientToken: Optional[String]
+ DryRun: Optional[Boolean]
+ PersistRoutes: Optional[RouteServerPersistRoutesAction]
+ PersistRoutesDuration: Optional[BoxedLong]
+ SnsNotificationsEnabled: Optional[Boolean]
+ TagSpecifications: Optional[TagSpecificationList]
+
+
+class RouteServer(TypedDict, total=False):
+ RouteServerId: Optional[RouteServerId]
+ AmazonSideAsn: Optional[Long]
+ State: Optional[RouteServerState]
+ Tags: Optional[TagList]
+ PersistRoutesState: Optional[RouteServerPersistRoutesState]
+ PersistRoutesDuration: Optional[BoxedLong]
+ SnsNotificationsEnabled: Optional[Boolean]
+ SnsTopicArn: Optional[String]
+
+
+class CreateRouteServerResult(TypedDict, total=False):
+ RouteServer: Optional[RouteServer]
+
+
class CreateRouteTableRequest(ServiceRequest):
TagSpecifications: Optional[TagSpecificationList]
ClientToken: Optional[String]
@@ -10412,6 +10632,33 @@ class DeleteRouteRequest(ServiceRequest):
DestinationIpv6CidrBlock: Optional[String]
+class DeleteRouteServerEndpointRequest(ServiceRequest):
+ RouteServerEndpointId: RouteServerEndpointId
+ DryRun: Optional[Boolean]
+
+
+class DeleteRouteServerEndpointResult(TypedDict, total=False):
+ RouteServerEndpoint: Optional[RouteServerEndpoint]
+
+
+class DeleteRouteServerPeerRequest(ServiceRequest):
+ RouteServerPeerId: RouteServerPeerId
+ DryRun: Optional[Boolean]
+
+
+class DeleteRouteServerPeerResult(TypedDict, total=False):
+ RouteServerPeer: Optional[RouteServerPeer]
+
+
+class DeleteRouteServerRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ DryRun: Optional[Boolean]
+
+
+class DeleteRouteServerResult(TypedDict, total=False):
+ RouteServer: Optional[RouteServer]
+
+
class DeleteRouteTableRequest(ServiceRequest):
DryRun: Optional[Boolean]
RouteTableId: RouteTableId
@@ -13748,6 +13995,63 @@ class DescribeReservedInstancesResult(TypedDict, total=False):
ReservedInstances: Optional[ReservedInstancesList]
+RouteServerEndpointIdsList = List[RouteServerEndpointId]
+
+
+class DescribeRouteServerEndpointsRequest(ServiceRequest):
+ RouteServerEndpointIds: Optional[RouteServerEndpointIdsList]
+ NextToken: Optional[String]
+ MaxResults: Optional[RouteServerMaxResults]
+ Filters: Optional[FilterList]
+ DryRun: Optional[Boolean]
+
+
+RouteServerEndpointsList = List[RouteServerEndpoint]
+
+
+class DescribeRouteServerEndpointsResult(TypedDict, total=False):
+ RouteServerEndpoints: Optional[RouteServerEndpointsList]
+ NextToken: Optional[String]
+
+
+RouteServerPeerIdsList = List[RouteServerPeerId]
+
+
+class DescribeRouteServerPeersRequest(ServiceRequest):
+ RouteServerPeerIds: Optional[RouteServerPeerIdsList]
+ NextToken: Optional[String]
+ MaxResults: Optional[RouteServerMaxResults]
+ Filters: Optional[FilterList]
+ DryRun: Optional[Boolean]
+
+
+RouteServerPeersList = List[RouteServerPeer]
+
+
+class DescribeRouteServerPeersResult(TypedDict, total=False):
+ RouteServerPeers: Optional[RouteServerPeersList]
+ NextToken: Optional[String]
+
+
+RouteServerIdsList = List[RouteServerId]
+
+
+class DescribeRouteServersRequest(ServiceRequest):
+ RouteServerIds: Optional[RouteServerIdsList]
+ NextToken: Optional[String]
+ MaxResults: Optional[RouteServerMaxResults]
+ Filters: Optional[FilterList]
+ DryRun: Optional[Boolean]
+
+
+RouteServersList = List[RouteServer]
+
+
+class DescribeRouteServersResult(TypedDict, total=False):
+ RouteServers: Optional[RouteServersList]
+ NextToken: Optional[String]
+
+
RouteTableIdStringList = List[RouteTableId]
@@ -15593,6 +15897,22 @@ class DisableIpamOrganizationAdminAccountResult(TypedDict, total=False):
Success: Optional[Boolean]
+class DisableRouteServerPropagationRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ RouteTableId: RouteTableId
+ DryRun: Optional[Boolean]
+
+
+class RouteServerPropagation(TypedDict, total=False):
+ RouteServerId: Optional[RouteServerId]
+ RouteTableId: Optional[RouteTableId]
+ State: Optional[RouteServerPropagationState]
+
+
+class DisableRouteServerPropagationResult(TypedDict, total=False):
+ RouteServerPropagation: Optional[RouteServerPropagation]
+
+
class DisableSerialConsoleAccessRequest(ServiceRequest):
DryRun: Optional[Boolean]
@@ -15747,6 +16067,16 @@ class DisassociateNatGatewayAddressResult(TypedDict, total=False):
NatGatewayAddresses: Optional[NatGatewayAddressList]
+class DisassociateRouteServerRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ VpcId: VpcId
+ DryRun: Optional[Boolean]
+
+
+class DisassociateRouteServerResult(TypedDict, total=False):
+ RouteServerAssociation: Optional[RouteServerAssociation]
+
+
class DisassociateRouteTableRequest(ServiceRequest):
DryRun: Optional[Boolean]
AssociationId: RouteTableAssociationId
@@ -16037,6 +16367,16 @@ class EnableReachabilityAnalyzerOrganizationSharingResult(TypedDict, total=False
ReturnValue: Optional[Boolean]
+class EnableRouteServerPropagationRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ RouteTableId: RouteTableId
+ DryRun: Optional[Boolean]
+
+
+class EnableRouteServerPropagationResult(TypedDict, total=False):
+ RouteServerPropagation: Optional[RouteServerPropagation]
+
+
class EnableSerialConsoleAccessRequest(ServiceRequest):
DryRun: Optional[Boolean]
@@ -16866,6 +17206,68 @@ class GetReservedInstancesExchangeQuoteResult(TypedDict, total=False):
ValidationFailureReason: Optional[String]
+class GetRouteServerAssociationsRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ DryRun: Optional[Boolean]
+
+
+RouteServerAssociationsList = List[RouteServerAssociation]
+
+
+class GetRouteServerAssociationsResult(TypedDict, total=False):
+ RouteServerAssociations: Optional[RouteServerAssociationsList]
+
+
+class GetRouteServerPropagationsRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ RouteTableId: Optional[RouteTableId]
+ DryRun: Optional[Boolean]
+
+
+RouteServerPropagationsList = List[RouteServerPropagation]
+
+
+class GetRouteServerPropagationsResult(TypedDict, total=False):
+ RouteServerPropagations: Optional[RouteServerPropagationsList]
+
+
+class GetRouteServerRoutingDatabaseRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ NextToken: Optional[String]
+ MaxResults: Optional[RouteServerMaxResults]
+ DryRun: Optional[Boolean]
+ Filters: Optional[FilterList]
+
+
+class RouteServerRouteInstallationDetail(TypedDict, total=False):
+ RouteTableId: Optional[RouteTableId]
+ RouteInstallationStatus: Optional[RouteServerRouteInstallationStatus]
+ RouteInstallationStatusReason: Optional[String]
+
+
+RouteServerRouteInstallationDetails = List[RouteServerRouteInstallationDetail]
+
+
+class RouteServerRoute(TypedDict, total=False):
+ RouteServerEndpointId: Optional[RouteServerEndpointId]
+ RouteServerPeerId: Optional[RouteServerPeerId]
+ RouteInstallationDetails: Optional[RouteServerRouteInstallationDetails]
+ RouteStatus: Optional[RouteServerRouteStatus]
+ Prefix: Optional[String]
+ AsPaths: Optional[AsPath]
+ Med: Optional[Integer]
+ NextHopIp: Optional[String]
+
+
+RouteServerRouteList = List[RouteServerRoute]
+
+
+class GetRouteServerRoutingDatabaseResult(TypedDict, total=False):
+ AreRoutesPersisted: Optional[Boolean]
+ Routes: Optional[RouteServerRouteList]
+ NextToken: Optional[String]
+
+
class GetSecurityGroupsForVpcRequest(ServiceRequest):
VpcId: VpcId
NextToken: Optional[String]
@@ -17670,6 +18072,7 @@ class ModifyClientVpnEndpointRequest(ServiceRequest):
ClientConnectOptions: Optional[ClientConnectOptions]
SessionTimeoutHours: Optional[Integer]
ClientLoginBannerOptions: Optional[ClientLoginBannerOptions]
+ ClientRouteEnforcementOptions: Optional[ClientRouteEnforcementOptions]
DisconnectOnSessionTimeout: Optional[Boolean]
@@ -17972,6 +18375,7 @@ class ModifyIpamRequest(ServiceRequest):
RemoveOperatingRegions: Optional[RemoveIpamOperatingRegionSet]
Tier: Optional[IpamTier]
EnablePrivateGua: Optional[Boolean]
+ MeteredAccount: Optional[IpamMeteredAccount]
class ModifyIpamResourceCidrRequest(ServiceRequest):
@@ -18112,6 +18516,18 @@ class ModifyReservedInstancesResult(TypedDict, total=False):
ReservedInstancesModificationId: Optional[String]
+class ModifyRouteServerRequest(ServiceRequest):
+ RouteServerId: RouteServerId
+ PersistRoutes: Optional[RouteServerPersistRoutesAction]
+ PersistRoutesDuration: Optional[BoxedLong]
+ SnsNotificationsEnabled: Optional[Boolean]
+ DryRun: Optional[Boolean]
+
+
+class ModifyRouteServerResult(TypedDict, total=False):
+ RouteServer: Optional[RouteServer]
+
+
class SecurityGroupRuleRequest(TypedDict, total=False):
IpProtocol: Optional[String]
FromPort: Optional[Integer]
@@ -20095,6 +20511,17 @@ def associate_nat_gateway_address(
) -> AssociateNatGatewayAddressResult:
raise NotImplementedError
+ @handler("AssociateRouteServer")
+ def associate_route_server(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ vpc_id: VpcId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> AssociateRouteServerResult:
+ raise NotImplementedError
+
@handler("AssociateRouteTable")
def associate_route_table(
self,
@@ -20586,6 +21013,7 @@ def create_client_vpn_endpoint(
client_connect_options: ClientConnectOptions = None,
session_timeout_hours: Integer = None,
client_login_banner_options: ClientLoginBannerOptions = None,
+ client_route_enforcement_options: ClientRouteEnforcementOptions = None,
disconnect_on_session_timeout: Boolean = None,
**kwargs,
) -> CreateClientVpnEndpointResult:
@@ -20792,6 +21220,7 @@ def create_ipam(
client_token: String = None,
tier: IpamTier = None,
enable_private_gua: Boolean = None,
+ metered_account: IpamMeteredAccount = None,
**kwargs,
) -> CreateIpamResult:
raise NotImplementedError
@@ -21177,6 +21606,47 @@ def create_route(
) -> CreateRouteResult:
raise NotImplementedError
+ @handler("CreateRouteServer")
+ def create_route_server(
+ self,
+ context: RequestContext,
+ amazon_side_asn: Long,
+ client_token: String = None,
+ dry_run: Boolean = None,
+ persist_routes: RouteServerPersistRoutesAction = None,
+ persist_routes_duration: BoxedLong = None,
+ sns_notifications_enabled: Boolean = None,
+ tag_specifications: TagSpecificationList = None,
+ **kwargs,
+ ) -> CreateRouteServerResult:
+ raise NotImplementedError
+
+ @handler("CreateRouteServerEndpoint")
+ def create_route_server_endpoint(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ subnet_id: SubnetId,
+ client_token: String = None,
+ dry_run: Boolean = None,
+ tag_specifications: TagSpecificationList = None,
+ **kwargs,
+ ) -> CreateRouteServerEndpointResult:
+ raise NotImplementedError
+
+ @handler("CreateRouteServerPeer")
+ def create_route_server_peer(
+ self,
+ context: RequestContext,
+ route_server_endpoint_id: RouteServerEndpointId,
+ peer_address: String,
+ bgp_options: RouteServerBgpOptionsRequest,
+ dry_run: Boolean = None,
+ tag_specifications: TagSpecificationList = None,
+ **kwargs,
+ ) -> CreateRouteServerPeerResult:
+ raise NotImplementedError
+
@handler("CreateRouteTable")
def create_route_table(
self,
@@ -22139,6 +22609,36 @@ def delete_route(
) -> None:
raise NotImplementedError
+ @handler("DeleteRouteServer")
+ def delete_route_server(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DeleteRouteServerResult:
+ raise NotImplementedError
+
+ @handler("DeleteRouteServerEndpoint")
+ def delete_route_server_endpoint(
+ self,
+ context: RequestContext,
+ route_server_endpoint_id: RouteServerEndpointId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DeleteRouteServerEndpointResult:
+ raise NotImplementedError
+
+ @handler("DeleteRouteServerPeer")
+ def delete_route_server_peer(
+ self,
+ context: RequestContext,
+ route_server_peer_id: RouteServerPeerId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DeleteRouteServerPeerResult:
+ raise NotImplementedError
+
@handler("DeleteRouteTable")
def delete_route_table(
self,
@@ -23844,6 +24344,45 @@ def describe_reserved_instances_offerings(
) -> DescribeReservedInstancesOfferingsResult:
raise NotImplementedError
+ @handler("DescribeRouteServerEndpoints")
+ def describe_route_server_endpoints(
+ self,
+ context: RequestContext,
+ route_server_endpoint_ids: RouteServerEndpointIdsList = None,
+ next_token: String = None,
+ max_results: RouteServerMaxResults = None,
+ filters: FilterList = None,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DescribeRouteServerEndpointsResult:
+ raise NotImplementedError
+
+ @handler("DescribeRouteServerPeers")
+ def describe_route_server_peers(
+ self,
+ context: RequestContext,
+ route_server_peer_ids: RouteServerPeerIdsList = None,
+ next_token: String = None,
+ max_results: RouteServerMaxResults = None,
+ filters: FilterList = None,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DescribeRouteServerPeersResult:
+ raise NotImplementedError
+
+ @handler("DescribeRouteServers")
+ def describe_route_servers(
+ self,
+ context: RequestContext,
+ route_server_ids: RouteServerIdsList = None,
+ next_token: String = None,
+ max_results: RouteServerMaxResults = None,
+ filters: FilterList = None,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DescribeRouteServersResult:
+ raise NotImplementedError
+
@handler("DescribeRouteTables")
def describe_route_tables(
self,
@@ -24759,6 +25298,17 @@ def disable_ipam_organization_admin_account(
) -> DisableIpamOrganizationAdminAccountResult:
raise NotImplementedError
+ @handler("DisableRouteServerPropagation")
+ def disable_route_server_propagation(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ route_table_id: RouteTableId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DisableRouteServerPropagationResult:
+ raise NotImplementedError
+
@handler("DisableSerialConsoleAccess")
def disable_serial_console_access(
self, context: RequestContext, dry_run: Boolean = None, **kwargs
@@ -24895,6 +25445,17 @@ def disassociate_nat_gateway_address(
) -> DisassociateNatGatewayAddressResult:
raise NotImplementedError
+ @handler("DisassociateRouteServer")
+ def disassociate_route_server(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ vpc_id: VpcId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> DisassociateRouteServerResult:
+ raise NotImplementedError
+
@handler("DisassociateRouteTable")
def disassociate_route_table(
self,
@@ -25092,6 +25653,17 @@ def enable_reachability_analyzer_organization_sharing(
) -> EnableReachabilityAnalyzerOrganizationSharingResult:
raise NotImplementedError
+ @handler("EnableRouteServerPropagation")
+ def enable_route_server_propagation(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ route_table_id: RouteTableId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> EnableRouteServerPropagationResult:
+ raise NotImplementedError
+
@handler("EnableSerialConsoleAccess")
def enable_serial_console_access(
self, context: RequestContext, dry_run: Boolean = None, **kwargs
@@ -25579,6 +26151,40 @@ def get_reserved_instances_exchange_quote(
) -> GetReservedInstancesExchangeQuoteResult:
raise NotImplementedError
+ @handler("GetRouteServerAssociations")
+ def get_route_server_associations(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> GetRouteServerAssociationsResult:
+ raise NotImplementedError
+
+ @handler("GetRouteServerPropagations")
+ def get_route_server_propagations(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ route_table_id: RouteTableId = None,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> GetRouteServerPropagationsResult:
+ raise NotImplementedError
+
+ @handler("GetRouteServerRoutingDatabase")
+ def get_route_server_routing_database(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ next_token: String = None,
+ max_results: RouteServerMaxResults = None,
+ dry_run: Boolean = None,
+ filters: FilterList = None,
+ **kwargs,
+ ) -> GetRouteServerRoutingDatabaseResult:
+ raise NotImplementedError
+
@handler("GetSecurityGroupsForVpc")
def get_security_groups_for_vpc(
self,
@@ -25988,6 +26594,7 @@ def modify_client_vpn_endpoint(
client_connect_options: ClientConnectOptions = None,
session_timeout_hours: Integer = None,
client_login_banner_options: ClientLoginBannerOptions = None,
+ client_route_enforcement_options: ClientRouteEnforcementOptions = None,
disconnect_on_session_timeout: Boolean = None,
**kwargs,
) -> ModifyClientVpnEndpointResult:
@@ -26247,6 +26854,7 @@ def modify_ipam(
remove_operating_regions: RemoveIpamOperatingRegionSet = None,
tier: IpamTier = None,
enable_private_gua: Boolean = None,
+ metered_account: IpamMeteredAccount = None,
**kwargs,
) -> ModifyIpamResult:
raise NotImplementedError
@@ -26394,6 +27002,19 @@ def modify_reserved_instances(
) -> ModifyReservedInstancesResult:
raise NotImplementedError
+ @handler("ModifyRouteServer")
+ def modify_route_server(
+ self,
+ context: RequestContext,
+ route_server_id: RouteServerId,
+ persist_routes: RouteServerPersistRoutesAction = None,
+ persist_routes_duration: BoxedLong = None,
+ sns_notifications_enabled: Boolean = None,
+ dry_run: Boolean = None,
+ **kwargs,
+ ) -> ModifyRouteServerResult:
+ raise NotImplementedError
+
@handler("ModifySecurityGroupRules")
def modify_security_group_rules(
self,
diff --git a/localstack-core/localstack/aws/api/events/__init__.py b/localstack-core/localstack/aws/api/events/__init__.py
index e1a17b290b1be..680a3e1ef3328 100644
--- a/localstack-core/localstack/aws/api/events/__init__.py
+++ b/localstack-core/localstack/aws/api/events/__init__.py
@@ -36,6 +36,7 @@
EndpointUrl = str
ErrorCode = str
ErrorMessage = str
+EventBusArn = str
EventBusDescription = str
EventBusName = str
EventBusNameOrArn = str
@@ -329,7 +330,7 @@ class AppSyncParameters(TypedDict, total=False):
class Archive(TypedDict, total=False):
ArchiveName: Optional[ArchiveName]
- EventSourceArn: Optional[Arn]
+ EventSourceArn: Optional[EventBusArn]
State: Optional[ArchiveState]
StateReason: Optional[ArchiveStateReason]
RetentionDays: Optional[RetentionDays]
@@ -497,10 +498,11 @@ class CreateApiDestinationResponse(TypedDict, total=False):
class CreateArchiveRequest(ServiceRequest):
ArchiveName: ArchiveName
- EventSourceArn: Arn
+ EventSourceArn: EventBusArn
Description: Optional[ArchiveDescription]
EventPattern: Optional[EventPattern]
RetentionDays: Optional[RetentionDays]
+ KmsKeyIdentifier: Optional[KmsKeyIdentifier]
class CreateArchiveResponse(TypedDict, total=False):
@@ -546,6 +548,7 @@ class CreateConnectionRequest(ServiceRequest):
AuthorizationType: ConnectionAuthorizationType
AuthParameters: CreateConnectionAuthRequestParameters
InvocationConnectivityParameters: Optional[ConnectivityResourceParameters]
+ KmsKeyIdentifier: Optional[KmsKeyIdentifier]
class CreateConnectionResponse(TypedDict, total=False):
@@ -730,11 +733,12 @@ class DescribeArchiveRequest(ServiceRequest):
class DescribeArchiveResponse(TypedDict, total=False):
ArchiveArn: Optional[ArchiveArn]
ArchiveName: Optional[ArchiveName]
- EventSourceArn: Optional[Arn]
+ EventSourceArn: Optional[EventBusArn]
Description: Optional[ArchiveDescription]
EventPattern: Optional[EventPattern]
State: Optional[ArchiveState]
StateReason: Optional[ArchiveStateReason]
+ KmsKeyIdentifier: Optional[KmsKeyIdentifier]
RetentionDays: Optional[RetentionDays]
SizeBytes: Optional[Long]
EventCount: Optional[Long]
@@ -754,6 +758,7 @@ class DescribeConnectionResponse(TypedDict, total=False):
StateReason: Optional[ConnectionStateReason]
AuthorizationType: Optional[ConnectionAuthorizationType]
SecretArn: Optional[SecretsManagerSecretArn]
+ KmsKeyIdentifier: Optional[KmsKeyIdentifier]
AuthParameters: Optional[ConnectionAuthResponseParameters]
CreationTime: Optional[Timestamp]
LastModifiedTime: Optional[Timestamp]
@@ -836,7 +841,7 @@ class DescribeReplayResponse(TypedDict, total=False):
Description: Optional[ReplayDescription]
State: Optional[ReplayState]
StateReason: Optional[ReplayStateReason]
- EventSourceArn: Optional[Arn]
+ EventSourceArn: Optional[ArchiveArn]
Destination: Optional[ReplayDestination]
EventStartTime: Optional[Timestamp]
EventEndTime: Optional[Timestamp]
@@ -994,7 +999,7 @@ class ListApiDestinationsResponse(TypedDict, total=False):
class ListArchivesRequest(ServiceRequest):
NamePrefix: Optional[ArchiveName]
- EventSourceArn: Optional[Arn]
+ EventSourceArn: Optional[EventBusArn]
State: Optional[ArchiveState]
NextToken: Optional[NextToken]
Limit: Optional[LimitMax100]
@@ -1094,14 +1099,14 @@ class ListPartnerEventSourcesResponse(TypedDict, total=False):
class ListReplaysRequest(ServiceRequest):
NamePrefix: Optional[ReplayName]
State: Optional[ReplayState]
- EventSourceArn: Optional[Arn]
+ EventSourceArn: Optional[ArchiveArn]
NextToken: Optional[NextToken]
Limit: Optional[LimitMax100]
class Replay(TypedDict, total=False):
ReplayName: Optional[ReplayName]
- EventSourceArn: Optional[Arn]
+ EventSourceArn: Optional[ArchiveArn]
State: Optional[ReplayState]
StateReason: Optional[ReplayStateReason]
EventStartTime: Optional[Timestamp]
@@ -1391,7 +1396,7 @@ class RemoveTargetsResponse(TypedDict, total=False):
class StartReplayRequest(ServiceRequest):
ReplayName: ReplayName
Description: Optional[ReplayDescription]
- EventSourceArn: Arn
+ EventSourceArn: ArchiveArn
EventStartTime: Timestamp
EventEndTime: Timestamp
Destination: ReplayDestination
@@ -1455,6 +1460,7 @@ class UpdateArchiveRequest(ServiceRequest):
Description: Optional[ArchiveDescription]
EventPattern: Optional[EventPattern]
RetentionDays: Optional[RetentionDays]
+ KmsKeyIdentifier: Optional[KmsKeyIdentifier]
class UpdateArchiveResponse(TypedDict, total=False):
@@ -1500,6 +1506,7 @@ class UpdateConnectionRequest(ServiceRequest):
AuthorizationType: Optional[ConnectionAuthorizationType]
AuthParameters: Optional[UpdateConnectionAuthRequestParameters]
InvocationConnectivityParameters: Optional[ConnectivityResourceParameters]
+ KmsKeyIdentifier: Optional[KmsKeyIdentifier]
class UpdateConnectionResponse(TypedDict, total=False):
@@ -1581,10 +1588,11 @@ def create_archive(
self,
context: RequestContext,
archive_name: ArchiveName,
- event_source_arn: Arn,
+ event_source_arn: EventBusArn,
description: ArchiveDescription = None,
event_pattern: EventPattern = None,
retention_days: RetentionDays = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> CreateArchiveResponse:
raise NotImplementedError
@@ -1598,6 +1606,7 @@ def create_connection(
auth_parameters: CreateConnectionAuthRequestParameters,
description: ConnectionDescription = None,
invocation_connectivity_parameters: ConnectivityResourceParameters = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> CreateConnectionResponse:
raise NotImplementedError
@@ -1788,7 +1797,7 @@ def list_archives(
self,
context: RequestContext,
name_prefix: ArchiveName = None,
- event_source_arn: Arn = None,
+ event_source_arn: EventBusArn = None,
state: ArchiveState = None,
next_token: NextToken = None,
limit: LimitMax100 = None,
@@ -1870,7 +1879,7 @@ def list_replays(
context: RequestContext,
name_prefix: ReplayName = None,
state: ReplayState = None,
- event_source_arn: Arn = None,
+ event_source_arn: ArchiveArn = None,
next_token: NextToken = None,
limit: LimitMax100 = None,
**kwargs,
@@ -2004,7 +2013,7 @@ def start_replay(
self,
context: RequestContext,
replay_name: ReplayName,
- event_source_arn: Arn,
+ event_source_arn: ArchiveArn,
event_start_time: Timestamp,
event_end_time: Timestamp,
destination: ReplayDestination,
@@ -2053,6 +2062,7 @@ def update_archive(
description: ArchiveDescription = None,
event_pattern: EventPattern = None,
retention_days: RetentionDays = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> UpdateArchiveResponse:
raise NotImplementedError
@@ -2066,6 +2076,7 @@ def update_connection(
authorization_type: ConnectionAuthorizationType = None,
auth_parameters: UpdateConnectionAuthRequestParameters = None,
invocation_connectivity_parameters: ConnectivityResourceParameters = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> UpdateConnectionResponse:
raise NotImplementedError
diff --git a/localstack-core/localstack/aws/api/kinesis/__init__.py b/localstack-core/localstack/aws/api/kinesis/__init__.py
index 515ac108c7dba..738a00f12cad1 100644
--- a/localstack-core/localstack/aws/api/kinesis/__init__.py
+++ b/localstack-core/localstack/aws/api/kinesis/__init__.py
@@ -494,11 +494,8 @@ class ListStreamsOutput(TypedDict, total=False):
StreamSummaries: Optional[StreamSummaryList]
-class ListTagsForStreamInput(ServiceRequest):
- StreamName: Optional[StreamName]
- ExclusiveStartTagKey: Optional[TagKey]
- Limit: Optional[ListTagsForStreamInputLimit]
- StreamARN: Optional[StreamARN]
+class ListTagsForResourceInput(ServiceRequest):
+ ResourceARN: ResourceARN
class Tag(TypedDict, total=False):
@@ -509,6 +506,17 @@ class Tag(TypedDict, total=False):
TagList = List[Tag]
+class ListTagsForResourceOutput(TypedDict, total=False):
+ Tags: Optional[TagList]
+
+
+class ListTagsForStreamInput(ServiceRequest):
+ StreamName: Optional[StreamName]
+ ExclusiveStartTagKey: Optional[TagKey]
+ Limit: Optional[ListTagsForStreamInputLimit]
+ StreamARN: Optional[StreamARN]
+
+
class ListTagsForStreamOutput(TypedDict, total=False):
Tags: TagList
HasMoreTags: BooleanObject
@@ -575,6 +583,7 @@ class PutResourcePolicyInput(ServiceRequest):
class RegisterStreamConsumerInput(ServiceRequest):
StreamARN: StreamARN
ConsumerName: ConsumerName
+ Tags: Optional[TagMap]
class RegisterStreamConsumerOutput(TypedDict, total=False):
@@ -647,6 +656,16 @@ class SubscribeToShardOutput(TypedDict, total=False):
EventStream: Iterator[SubscribeToShardEventStream]
+class TagResourceInput(ServiceRequest):
+ Tags: TagMap
+ ResourceARN: ResourceARN
+
+
+class UntagResourceInput(ServiceRequest):
+ TagKeys: TagKeyList
+ ResourceARN: ResourceARN
+
+
class UpdateShardCountInput(ServiceRequest):
StreamName: Optional[StreamName]
TargetShardCount: PositiveIntegerObject
@@ -871,6 +890,12 @@ def list_streams(
) -> ListStreamsOutput:
raise NotImplementedError
+ @handler("ListTagsForResource")
+ def list_tags_for_resource(
+ self, context: RequestContext, resource_arn: ResourceARN, **kwargs
+ ) -> ListTagsForResourceOutput:
+ raise NotImplementedError
+
@handler("ListTagsForStream")
def list_tags_for_stream(
self,
@@ -928,7 +953,12 @@ def put_resource_policy(
@handler("RegisterStreamConsumer")
def register_stream_consumer(
- self, context: RequestContext, stream_arn: StreamARN, consumer_name: ConsumerName, **kwargs
+ self,
+ context: RequestContext,
+ stream_arn: StreamARN,
+ consumer_name: ConsumerName,
+ tags: TagMap = None,
+ **kwargs,
) -> RegisterStreamConsumerOutput:
raise NotImplementedError
@@ -990,6 +1020,18 @@ def subscribe_to_shard(
) -> SubscribeToShardOutput:
raise NotImplementedError
+ @handler("TagResource")
+ def tag_resource(
+ self, context: RequestContext, tags: TagMap, resource_arn: ResourceARN, **kwargs
+ ) -> None:
+ raise NotImplementedError
+
+ @handler("UntagResource")
+ def untag_resource(
+ self, context: RequestContext, tag_keys: TagKeyList, resource_arn: ResourceARN, **kwargs
+ ) -> None:
+ raise NotImplementedError
+
@handler("UpdateShardCount")
def update_shard_count(
self,
diff --git a/localstack-core/localstack/aws/api/logs/__init__.py b/localstack-core/localstack/aws/api/logs/__init__.py
index 3a22676f4bbeb..2d39131abbb85 100644
--- a/localstack-core/localstack/aws/api/logs/__init__.py
+++ b/localstack-core/localstack/aws/api/logs/__init__.py
@@ -231,6 +231,7 @@ class IntegrationType(StrEnum):
class LogGroupClass(StrEnum):
STANDARD = "STANDARD"
INFREQUENT_ACCESS = "INFREQUENT_ACCESS"
+ DELIVERY = "DELIVERY"
class OpenSearchResourceStatusType(StrEnum):
diff --git a/localstack-core/localstack/aws/api/resource_groups/__init__.py b/localstack-core/localstack/aws/api/resource_groups/__init__.py
index 4e9f669dcefff..42e0f7f5a3eb1 100644
--- a/localstack-core/localstack/aws/api/resource_groups/__init__.py
+++ b/localstack-core/localstack/aws/api/resource_groups/__init__.py
@@ -287,6 +287,7 @@ class GetTagSyncTaskOutput(TypedDict, total=False):
TaskArn: Optional[TagSyncTaskArn]
TagKey: Optional[TagKey]
TagValue: Optional[TagValue]
+ ResourceQuery: Optional[ResourceQuery]
RoleArn: Optional[RoleArn]
Status: Optional[TagSyncTaskStatus]
ErrorMessage: Optional[ErrorMessage]
@@ -463,6 +464,7 @@ class TagSyncTaskItem(TypedDict, total=False):
TaskArn: Optional[TagSyncTaskArn]
TagKey: Optional[TagKey]
TagValue: Optional[TagValue]
+ ResourceQuery: Optional[ResourceQuery]
RoleArn: Optional[RoleArn]
Status: Optional[TagSyncTaskStatus]
ErrorMessage: Optional[ErrorMessage]
@@ -500,8 +502,9 @@ class SearchResourcesOutput(TypedDict, total=False):
class StartTagSyncTaskInput(ServiceRequest):
Group: GroupStringV2
- TagKey: TagKey
- TagValue: TagValue
+ TagKey: Optional[TagKey]
+ TagValue: Optional[TagValue]
+ ResourceQuery: Optional[ResourceQuery]
RoleArn: RoleArn
@@ -511,6 +514,7 @@ class StartTagSyncTaskOutput(TypedDict, total=False):
TaskArn: Optional[TagSyncTaskArn]
TagKey: Optional[TagKey]
TagValue: Optional[TagValue]
+ ResourceQuery: Optional[ResourceQuery]
RoleArn: Optional[RoleArn]
@@ -738,9 +742,10 @@ def start_tag_sync_task(
self,
context: RequestContext,
group: GroupStringV2,
- tag_key: TagKey,
- tag_value: TagValue,
role_arn: RoleArn,
+ tag_key: TagKey = None,
+ tag_value: TagValue = None,
+ resource_query: ResourceQuery = None,
**kwargs,
) -> StartTagSyncTaskOutput:
raise NotImplementedError
diff --git a/localstack-core/localstack/aws/api/route53/__init__.py b/localstack-core/localstack/aws/api/route53/__init__.py
index 820b700b5ef45..74a5da6e5a1ef 100644
--- a/localstack-core/localstack/aws/api/route53/__init__.py
+++ b/localstack-core/localstack/aws/api/route53/__init__.py
@@ -277,6 +277,8 @@ class ResourceRecordSetRegion(StrEnum):
ap_southeast_5 = "ap-southeast-5"
mx_central_1 = "mx-central-1"
ap_southeast_7 = "ap-southeast-7"
+ us_gov_east_1 = "us-gov-east-1"
+ us_gov_west_1 = "us-gov-west-1"
class ReusableDelegationSetLimitType(StrEnum):
diff --git a/localstack-core/localstack/aws/api/s3control/__init__.py b/localstack-core/localstack/aws/api/s3control/__init__.py
index a8a5963c4cbfd..ff20040184f01 100644
--- a/localstack-core/localstack/aws/api/s3control/__init__.py
+++ b/localstack-core/localstack/aws/api/s3control/__init__.py
@@ -405,6 +405,17 @@ class S3StorageClass(StrEnum):
GLACIER_IR = "GLACIER_IR"
+class ScopePermission(StrEnum):
+ GetObject = "GetObject"
+ GetObjectAttributes = "GetObjectAttributes"
+ ListMultipartUploadParts = "ListMultipartUploadParts"
+ ListBucket = "ListBucket"
+ ListBucketMultipartUploads = "ListBucketMultipartUploads"
+ PutObject = "PutObject"
+ DeleteObject = "DeleteObject"
+ AbortMultipartUpload = "AbortMultipartUpload"
+
+
class SseKmsEncryptedObjectsStatus(StrEnum):
Enabled = "Enabled"
Disabled = "Disabled"
@@ -824,6 +835,15 @@ class CreateAccessPointForObjectLambdaResult(TypedDict, total=False):
Alias: Optional[ObjectLambdaAccessPointAlias]
+ScopePermissionList = List[ScopePermission]
+PrefixesList = List[Prefix]
+
+
+class Scope(TypedDict, total=False):
+ Prefixes: Optional[PrefixesList]
+ Permissions: Optional[ScopePermissionList]
+
+
class CreateAccessPointRequest(ServiceRequest):
AccountId: AccountId
Name: AccessPointName
@@ -831,6 +851,7 @@ class CreateAccessPointRequest(ServiceRequest):
VpcConfiguration: Optional[VpcConfiguration]
PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration]
BucketAccountId: Optional[AccountId]
+ Scope: Optional[Scope]
class CreateAccessPointResult(TypedDict, total=False):
@@ -1222,6 +1243,11 @@ class DeleteAccessPointRequest(ServiceRequest):
Name: AccessPointName
+class DeleteAccessPointScopeRequest(ServiceRequest):
+ AccountId: AccountId
+ Name: AccessPointName
+
+
class DeleteBucketLifecycleConfigurationRequest(ServiceRequest):
AccountId: AccountId
Bucket: BucketName
@@ -1561,6 +1587,15 @@ class GetAccessPointResult(TypedDict, total=False):
BucketAccountId: Optional[AccountId]
+class GetAccessPointScopeRequest(ServiceRequest):
+ AccountId: AccountId
+ Name: AccessPointName
+
+
+class GetAccessPointScopeResult(TypedDict, total=False):
+ Scope: Optional[Scope]
+
+
class GetBucketLifecycleConfigurationRequest(ServiceRequest):
AccountId: AccountId
Bucket: BucketName
@@ -1965,6 +2000,18 @@ class ListAccessGrantsResult(TypedDict, total=False):
AccessGrantsList: Optional[AccessGrantsList]
+class ListAccessPointsForDirectoryBucketsRequest(ServiceRequest):
+ AccountId: AccountId
+ DirectoryBucket: Optional[BucketName]
+ NextToken: Optional[NonEmptyMaxLength1024String]
+ MaxResults: Optional[MaxResults]
+
+
+class ListAccessPointsForDirectoryBucketsResult(TypedDict, total=False):
+ AccessPointList: Optional[AccessPointList]
+ NextToken: Optional[NonEmptyMaxLength1024String]
+
+
class ListAccessPointsForObjectLambdaRequest(ServiceRequest):
AccountId: AccountId
NextToken: Optional[NonEmptyMaxLength1024String]
@@ -2137,6 +2184,12 @@ class PutAccessPointPolicyRequest(ServiceRequest):
Policy: Policy
+class PutAccessPointScopeRequest(ServiceRequest):
+ AccountId: AccountId
+ Name: AccessPointName
+ Scope: Scope
+
+
class PutBucketLifecycleConfigurationRequest(ServiceRequest):
AccountId: AccountId
Bucket: BucketName
@@ -2360,6 +2413,7 @@ def create_access_point(
vpc_configuration: VpcConfiguration = None,
public_access_block_configuration: PublicAccessBlockConfiguration = None,
bucket_account_id: AccountId = None,
+ scope: Scope = None,
**kwargs,
) -> CreateAccessPointResult:
raise NotImplementedError
@@ -2498,6 +2552,12 @@ def delete_access_point_policy_for_object_lambda(
) -> None:
raise NotImplementedError
+ @handler("DeleteAccessPointScope")
+ def delete_access_point_scope(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs
+ ) -> None:
+ raise NotImplementedError
+
@handler("DeleteBucket")
def delete_bucket(
self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs
@@ -2687,6 +2747,12 @@ def get_access_point_policy_status_for_object_lambda(
) -> GetAccessPointPolicyStatusForObjectLambdaResult:
raise NotImplementedError
+ @handler("GetAccessPointScope")
+ def get_access_point_scope(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs
+ ) -> GetAccessPointScopeResult:
+ raise NotImplementedError
+
@handler("GetBucket")
def get_bucket(
self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs
@@ -2858,6 +2924,18 @@ def list_access_points(
) -> ListAccessPointsResult:
raise NotImplementedError
+ @handler("ListAccessPointsForDirectoryBuckets")
+ def list_access_points_for_directory_buckets(
+ self,
+ context: RequestContext,
+ account_id: AccountId,
+ directory_bucket: BucketName = None,
+ next_token: NonEmptyMaxLength1024String = None,
+ max_results: MaxResults = None,
+ **kwargs,
+ ) -> ListAccessPointsForDirectoryBucketsResult:
+ raise NotImplementedError
+
@handler("ListAccessPointsForObjectLambda")
def list_access_points_for_object_lambda(
self,
@@ -2987,6 +3065,17 @@ def put_access_point_policy_for_object_lambda(
) -> None:
raise NotImplementedError
+ @handler("PutAccessPointScope")
+ def put_access_point_scope(
+ self,
+ context: RequestContext,
+ account_id: AccountId,
+ name: AccessPointName,
+ scope: Scope,
+ **kwargs,
+ ) -> None:
+ raise NotImplementedError
+
@handler("PutBucketLifecycleConfiguration")
def put_bucket_lifecycle_configuration(
self,
diff --git a/localstack-core/localstack/aws/api/ssm/__init__.py b/localstack-core/localstack/aws/api/ssm/__init__.py
index a906bb4247944..a2e95b19d9538 100644
--- a/localstack-core/localstack/aws/api/ssm/__init__.py
+++ b/localstack-core/localstack/aws/api/ssm/__init__.py
@@ -4,6 +4,9 @@
from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler
+AccessKeyIdType = str
+AccessKeySecretType = str
+AccessRequestId = str
Account = str
AccountId = str
ActivationCode = str
@@ -252,6 +255,7 @@
ParametersFilterValue = str
PatchAdvisoryId = str
PatchArch = str
+PatchAvailableSecurityUpdateCount = int
PatchBaselineMaxResults = int
PatchBugzillaId = str
PatchCVEId = str
@@ -348,6 +352,7 @@
SessionOwner = str
SessionReason = str
SessionTarget = str
+SessionTokenType = str
SharedDocumentVersion = str
SnapshotDownloadUrl = str
SnapshotId = str
@@ -361,6 +366,7 @@
StepExecutionFilterValue = str
StreamUrl = str
String = str
+String1to256 = str
StringDateTime = str
TagKey = str
TagValue = str
@@ -380,6 +386,14 @@
Version = str
+class AccessRequestStatus(StrEnum):
+ Approved = "Approved"
+ Rejected = "Rejected"
+ Revoked = "Revoked"
+ Expired = "Expired"
+ Pending = "Pending"
+
+
class AssociationComplianceSeverity(StrEnum):
CRITICAL = "CRITICAL"
HIGH = "HIGH"
@@ -477,6 +491,7 @@ class AutomationExecutionStatus(StrEnum):
class AutomationSubtype(StrEnum):
ChangeRequest = "ChangeRequest"
+ AccessRequest = "AccessRequest"
class AutomationType(StrEnum):
@@ -631,6 +646,8 @@ class DocumentType(StrEnum):
CloudFormation = "CloudFormation"
ConformancePackTemplate = "ConformancePackTemplate"
QuickSetup = "QuickSetup"
+ ManualApprovalPolicy = "ManualApprovalPolicy"
+ AutoApprovalPolicy = "AutoApprovalPolicy"
class ExecutionMode(StrEnum):
@@ -880,6 +897,15 @@ class OpsItemFilterKey(StrEnum):
Category = "Category"
Severity = "Severity"
OpsItemType = "OpsItemType"
+ AccessRequestByRequesterArn = "AccessRequestByRequesterArn"
+ AccessRequestByRequesterId = "AccessRequestByRequesterId"
+ AccessRequestByApproverArn = "AccessRequestByApproverArn"
+ AccessRequestByApproverId = "AccessRequestByApproverId"
+ AccessRequestBySourceAccountId = "AccessRequestBySourceAccountId"
+ AccessRequestBySourceOpsItemId = "AccessRequestBySourceOpsItemId"
+ AccessRequestBySourceRegion = "AccessRequestBySourceRegion"
+ AccessRequestByIsReplica = "AccessRequestByIsReplica"
+ AccessRequestByTargetResourceId = "AccessRequestByTargetResourceId"
ChangeRequestByRequesterArn = "ChangeRequestByRequesterArn"
ChangeRequestByRequesterName = "ChangeRequestByRequesterName"
ChangeRequestByApproverArn = "ChangeRequestByApproverArn"
@@ -925,6 +951,7 @@ class OpsItemStatus(StrEnum):
ChangeCalendarOverrideRejected = "ChangeCalendarOverrideRejected"
PendingApproval = "PendingApproval"
Approved = "Approved"
+ Revoked = "Revoked"
Rejected = "Rejected"
Closed = "Closed"
@@ -960,6 +987,7 @@ class PatchComplianceDataState(StrEnum):
MISSING = "MISSING"
NOT_APPLICABLE = "NOT_APPLICABLE"
FAILED = "FAILED"
+ AVAILABLE_SECURITY_UPDATE = "AVAILABLE_SECURITY_UPDATE"
class PatchComplianceLevel(StrEnum):
@@ -971,6 +999,11 @@ class PatchComplianceLevel(StrEnum):
UNSPECIFIED = "UNSPECIFIED"
+class PatchComplianceStatus(StrEnum):
+ COMPLIANT = "COMPLIANT"
+ NON_COMPLIANT = "NON_COMPLIANT"
+
+
class PatchDeploymentStatus(StrEnum):
APPROVED = "APPROVED"
PENDING_APPROVAL = "PENDING_APPROVAL"
@@ -1093,6 +1126,7 @@ class SignalType(StrEnum):
StartStep = "StartStep"
StopStep = "StopStep"
Resume = "Resume"
+ Revoke = "Revoke"
class SourceType(StrEnum):
@@ -1118,6 +1152,12 @@ class StopType(StrEnum):
Cancel = "Cancel"
+class AccessDeniedException(ServiceException):
+ code: str = "AccessDeniedException"
+ sender_fault: bool = False
+ status_code: int = 400
+
+
class AlreadyExistsException(ServiceException):
code: str = "AlreadyExistsException"
sender_fault: bool = False
@@ -1848,6 +1888,16 @@ class ResourcePolicyNotFoundException(ServiceException):
status_code: int = 400
+class ServiceQuotaExceededException(ServiceException):
+ code: str = "ServiceQuotaExceededException"
+ sender_fault: bool = False
+ status_code: int = 400
+ ResourceId: Optional[String]
+ ResourceType: Optional[String]
+ QuotaCode: String
+ ServiceCode: String
+
+
class ServiceSettingNotFound(ServiceException):
code: str = "ServiceSettingNotFound"
sender_fault: bool = False
@@ -1878,6 +1928,14 @@ class TargetNotConnected(ServiceException):
status_code: int = 400
+class ThrottlingException(ServiceException):
+ code: str = "ThrottlingException"
+ sender_fault: bool = False
+ status_code: int = 400
+ QuotaCode: Optional[String]
+ ServiceCode: Optional[String]
+
+
class TooManyTagsError(ServiceException):
code: str = "TooManyTagsError"
sender_fault: bool = False
@@ -2510,6 +2568,7 @@ class BaselineOverride(TypedDict, total=False):
RejectedPatchesAction: Optional[PatchAction]
ApprovedPatchesEnableNonSecurity: Optional[Boolean]
Sources: Optional[PatchSourceList]
+ AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus]
InstanceIdList = List[InstanceId]
@@ -2970,6 +3029,7 @@ class CreatePatchBaselineRequest(ServiceRequest):
RejectedPatchesAction: Optional[PatchAction]
Description: Optional[BaselineDescription]
Sources: Optional[PatchSourceList]
+ AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus]
ClientToken: Optional[ClientToken]
Tags: Optional[TagList]
@@ -3025,6 +3085,13 @@ class CreateResourceDataSyncResult(TypedDict, total=False):
pass
+class Credentials(TypedDict, total=False):
+ AccessKeyId: AccessKeyIdType
+ SecretAccessKey: AccessKeySecretType
+ SessionToken: SessionTokenType
+ ExpirationTime: DateTime
+
+
class DeleteActivationRequest(ServiceRequest):
ActivationId: ActivationId
@@ -3547,6 +3614,7 @@ class InstancePatchState(TypedDict, total=False):
FailedCount: Optional[PatchFailedCount]
UnreportedNotApplicableCount: Optional[PatchUnreportedNotApplicableCount]
NotApplicableCount: Optional[PatchNotApplicableCount]
+ AvailableSecurityUpdateCount: Optional[PatchAvailableSecurityUpdateCount]
OperationStartTime: DateTime
OperationEndTime: DateTime
Operation: PatchOperationType
@@ -4089,6 +4157,7 @@ class DescribePatchGroupStateResult(TypedDict, total=False):
InstancesWithCriticalNonCompliantPatches: Optional[InstancesCount]
InstancesWithSecurityNonCompliantPatches: Optional[InstancesCount]
InstancesWithOtherNonCompliantPatches: Optional[InstancesCount]
+ InstancesWithAvailableSecurityUpdates: Optional[Integer]
class DescribePatchGroupsRequest(ServiceRequest):
@@ -4274,6 +4343,15 @@ class ExecutionPreview(TypedDict, total=False):
Automation: Optional[AutomationExecutionPreview]
+class GetAccessTokenRequest(ServiceRequest):
+ AccessRequestId: AccessRequestId
+
+
+class GetAccessTokenResponse(TypedDict, total=False):
+ Credentials: Optional[Credentials]
+ AccessRequestStatus: Optional[AccessRequestStatus]
+
+
class GetAutomationExecutionRequest(ServiceRequest):
AutomationExecutionId: AutomationExecutionId
@@ -4857,6 +4935,7 @@ class GetPatchBaselineResult(TypedDict, total=False):
ModifiedDate: Optional[DateTime]
Description: Optional[BaselineDescription]
Sources: Optional[PatchSourceList]
+ AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus]
class GetResourcePoliciesRequest(ServiceRequest):
@@ -5524,6 +5603,16 @@ class SendCommandResult(TypedDict, total=False):
SessionManagerParameters = Dict[SessionManagerParameterName, SessionManagerParameterValueList]
+class StartAccessRequestRequest(ServiceRequest):
+ Reason: String1to256
+ Targets: Targets
+ Tags: Optional[TagList]
+
+
+class StartAccessRequestResponse(TypedDict, total=False):
+ AccessRequestId: Optional[AccessRequestId]
+
+
class StartAssociationsOnceRequest(ServiceRequest):
AssociationIds: AssociationIdList
@@ -5835,6 +5924,7 @@ class UpdatePatchBaselineRequest(ServiceRequest):
RejectedPatchesAction: Optional[PatchAction]
Description: Optional[BaselineDescription]
Sources: Optional[PatchSourceList]
+ AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus]
Replace: Optional[Boolean]
@@ -5853,6 +5943,7 @@ class UpdatePatchBaselineResult(TypedDict, total=False):
ModifiedDate: Optional[DateTime]
Description: Optional[BaselineDescription]
Sources: Optional[PatchSourceList]
+ AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus]
class UpdateResourceDataSyncRequest(ServiceRequest):
@@ -6055,6 +6146,7 @@ def create_patch_baseline(
rejected_patches_action: PatchAction = None,
description: BaselineDescription = None,
sources: PatchSourceList = None,
+ available_security_updates_compliance_status: PatchComplianceStatus = None,
client_token: ClientToken = None,
tags: TagList = None,
**kwargs,
@@ -6598,6 +6690,12 @@ def disassociate_ops_item_related_item(
) -> DisassociateOpsItemRelatedItemResponse:
raise NotImplementedError
+ @handler("GetAccessToken")
+ def get_access_token(
+ self, context: RequestContext, access_request_id: AccessRequestId, **kwargs
+ ) -> GetAccessTokenResponse:
+ raise NotImplementedError
+
@handler("GetAutomationExecution")
def get_automation_execution(
self, context: RequestContext, automation_execution_id: AutomationExecutionId, **kwargs
@@ -7236,6 +7334,17 @@ def send_command(
) -> SendCommandResult:
raise NotImplementedError
+ @handler("StartAccessRequest")
+ def start_access_request(
+ self,
+ context: RequestContext,
+ reason: String1to256,
+ targets: Targets,
+ tags: TagList = None,
+ **kwargs,
+ ) -> StartAccessRequestResponse:
+ raise NotImplementedError
+
@handler("StartAssociationsOnce")
def start_associations_once(
self, context: RequestContext, association_ids: AssociationIdList, **kwargs
@@ -7522,6 +7631,7 @@ def update_patch_baseline(
rejected_patches_action: PatchAction = None,
description: BaselineDescription = None,
sources: PatchSourceList = None,
+ available_security_updates_compliance_status: PatchComplianceStatus = None,
replace: Boolean = None,
**kwargs,
) -> UpdatePatchBaselineResult:
diff --git a/localstack-core/localstack/aws/api/transcribe/__init__.py b/localstack-core/localstack/aws/api/transcribe/__init__.py
index 2ab6b49a74b37..440363a46dd95 100644
--- a/localstack-core/localstack/aws/api/transcribe/__init__.py
+++ b/localstack-core/localstack/aws/api/transcribe/__init__.py
@@ -177,6 +177,7 @@ class LanguageCode(StrEnum):
uk_UA = "uk-UA"
uz_UZ = "uz-UZ"
wo_SN = "wo-SN"
+ zh_HK = "zh-HK"
zu_ZA = "zu-ZA"
diff --git a/localstack-core/localstack/cli/main.py b/localstack-core/localstack/cli/main.py
index d9162bb098a4d..de1f04e38cac5 100644
--- a/localstack-core/localstack/cli/main.py
+++ b/localstack-core/localstack/cli/main.py
@@ -6,9 +6,10 @@ def main():
os.environ["LOCALSTACK_CLI"] = "1"
# config profiles are the first thing that need to be loaded (especially before localstack.config!)
- from .profiles import set_profile_from_sys_argv
+ from .profiles import set_and_remove_profile_from_sys_argv
- set_profile_from_sys_argv()
+ # WARNING: This function modifies sys.argv to remove the profile argument.
+ set_and_remove_profile_from_sys_argv()
# initialize CLI plugins
from .localstack import create_with_plugins
diff --git a/localstack-core/localstack/cli/profiles.py b/localstack-core/localstack/cli/profiles.py
index 1625b802f73a4..585757496e08c 100644
--- a/localstack-core/localstack/cli/profiles.py
+++ b/localstack-core/localstack/cli/profiles.py
@@ -1,3 +1,4 @@
+import argparse
import os
import sys
from typing import Optional
@@ -5,36 +6,61 @@
# important: this needs to be free of localstack imports
-def set_profile_from_sys_argv():
+def set_and_remove_profile_from_sys_argv():
"""
- Reads the --profile flag from sys.argv and then sets the 'CONFIG_PROFILE' os variable accordingly. This is later
- picked up by ``localstack.config``.
+ Performs the following steps:
+
+ 1. Use argparse to parse the command line arguments for the --profile flag.
+ All occurrences are removed from the sys.argv list, and the value from
+ the last occurrence is used. This allows the user to specify a profile
+ at any point on the command line.
+
+ 2. If a --profile flag is not found, check for the -p flag. The first
+ occurrence of the -p flag is used and it is not removed from sys.argv.
+ The reasoning for this is that at least one of the CLI subcommands has
+ a -p flag, and we want to keep it in sys.argv for that command to
+ pick up. An existing bug means that if a -p flag is used with a
+ subcommand, it could erroneously be used as the profile value as well.
+ This behaviour is undesired, but we must maintain back-compatibility of
+ allowing the profile to be specified using -p.
+
+ 3. If a profile is found, the 'CONFIG_PROFILE' os variable is set
+ accordingly. This is later picked up by ``localstack.config``.
+
+ WARNING: Any --profile options are REMOVED from sys.argv, so that they are
+ not passed to the localstack CLI. This allows the profile option
+ to be set at any point on the command line.
"""
- profile = parse_profile_argument(sys.argv)
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--profile")
+ namespace, sys.argv = parser.parse_known_args(sys.argv)
+ profile = namespace.profile
+
+ if not profile:
+ # if no profile is given, check for the -p argument
+ profile = parse_p_argument(sys.argv)
+
if profile:
os.environ["CONFIG_PROFILE"] = profile.strip()
-def parse_profile_argument(args) -> Optional[str]:
+def parse_p_argument(args) -> Optional[str]:
"""
- Lightweight arg parsing to find ``--profile ``, or ``--profile=`` and return the value of
+ Lightweight arg parsing to find the first occurrence of ``-p ``, or ``-p=`` and return the value of
```` from the given arguments.
:param args: list of CLI arguments
- :returns: the value of ``--profile``.
+ :returns: the value of ``-p``.
"""
for i, current_arg in enumerate(args):
- if current_arg.startswith("--profile="):
- # if using the "=" notation, we remove the "--profile=" prefix to get the value
- return current_arg[10:]
- elif current_arg.startswith("-p="):
+ if current_arg.startswith("-p="):
# if using the "=" notation, we remove the "-p=" prefix to get the value
return current_arg[3:]
- if current_arg in ["--profile", "-p"]:
+ if current_arg == "-p":
# otherwise use the next arg in the args list as value
try:
return args[i + 1]
- except KeyError:
+ except IndexError:
return None
return None
diff --git a/localstack-core/localstack/config.py b/localstack-core/localstack/config.py
index 9327053274a18..89583165b8787 100644
--- a/localstack-core/localstack/config.py
+++ b/localstack-core/localstack/config.py
@@ -605,7 +605,7 @@ def _get_unprivileged_port_range_start(self) -> int:
def is_unprivileged(self) -> bool:
return self.port >= self._get_unprivileged_port_range_start()
- def host_and_port(self):
+ def host_and_port(self) -> str:
formatted_host = f"[{self.host}]" if is_ipv6_address(self.host) else self.host
return f"{formatted_host}:{self.port}" if self.port is not None else formatted_host
@@ -1089,10 +1089,8 @@ def populate_edge_configuration(
os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC") or 10
)
-# Adding Stepfunctions default port
-LOCAL_PORT_STEPFUNCTIONS = int(os.environ.get("LOCAL_PORT_STEPFUNCTIONS") or 8083)
-# Stepfunctions lambda endpoint override
-STEPFUNCTIONS_LAMBDA_ENDPOINT = os.environ.get("STEPFUNCTIONS_LAMBDA_ENDPOINT", "").strip()
+# Specifies the path to the mock configuration file for Step Functions, commonly named MockConfigFile.json.
+SFN_MOCK_CONFIG = os.environ.get("SFN_MOCK_CONFIG", "").strip()
# path prefix for windows volume mounting
WINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get("WINDOWS_DOCKER_MOUNT_PREFIX", "/host_mnt")
@@ -1364,7 +1362,6 @@ def use_custom_dns():
"SQS_ENDPOINT_STRATEGY",
"SQS_DISABLE_CLOUDWATCH_METRICS",
"SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL",
- "STEPFUNCTIONS_LAMBDA_ENDPOINT",
"STRICT_SERVICE_LOADING",
"TF_COMPAT_MODE",
"USE_SSL",
diff --git a/localstack-core/localstack/constants.py b/localstack-core/localstack/constants.py
index 57406fa09e6a8..f5d43d2bab1e9 100644
--- a/localstack-core/localstack/constants.py
+++ b/localstack-core/localstack/constants.py
@@ -45,7 +45,7 @@
LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(MODULE_MAIN_PATH, ".."))
# virtualenv folder
-LOCALSTACK_VENV_FOLDER = os.environ.get("VIRTUAL_ENV")
+LOCALSTACK_VENV_FOLDER: str = os.environ.get("VIRTUAL_ENV")
if not LOCALSTACK_VENV_FOLDER:
# fallback to the previous logic
LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, ".venv")
diff --git a/localstack-core/localstack/deprecations.py b/localstack-core/localstack/deprecations.py
index 1ece1f5ccfec3..1690ca227d878 100644
--- a/localstack-core/localstack/deprecations.py
+++ b/localstack-core/localstack/deprecations.py
@@ -311,6 +311,20 @@ def is_affected(self) -> bool:
" is faster, achieves great AWS parity, and fixes compatibility issues with the StepFunctions JSONata feature."
" Please remove EVENT_RULE_ENGINE.",
),
+ EnvVarDeprecation(
+ "STEPFUNCTIONS_LAMBDA_ENDPOINT",
+ "4.0.0",
+ "This is only supported for the legacy provider. URL to use as the Lambda service endpoint in Step Functions. "
+ "By default this is the LocalStack Lambda endpoint. Use default to select the original AWS Lambda endpoint.",
+ ),
+ EnvVarDeprecation(
+ "LOCAL_PORT_STEPFUNCTIONS",
+ "4.0.0",
+ "This is only supported for the legacy provider."
+ "It defines the local port to which Step Functions traffic is redirected."
+ "By default, LocalStack routes Step Functions traffic to its internal runtime. "
+ "Use this variable only if you need to redirect traffic to a different local Step Functions runtime.",
+ ),
]
diff --git a/localstack-core/localstack/dev/run/configurators.py b/localstack-core/localstack/dev/run/configurators.py
index 2c3b253965e87..4f1b9e3e29cde 100644
--- a/localstack-core/localstack/dev/run/configurators.py
+++ b/localstack-core/localstack/dev/run/configurators.py
@@ -10,9 +10,9 @@
from localstack import config, constants
from localstack.utils.bootstrap import ContainerConfigurators
from localstack.utils.container_utils.container_client import (
+ BindMount,
ContainerClient,
ContainerConfiguration,
- VolumeBind,
VolumeMappings,
)
from localstack.utils.docker_utils import DOCKER_CLIENT
@@ -107,7 +107,7 @@ def __call__(self, cfg: ContainerConfiguration):
# encoding needs to be "utf-8" since scripts could include emojis
file.write_text(self.script, newline="\n", encoding="utf-8")
file.chmod(0o777)
- cfg.volumes.add(VolumeBind(str(file), f"/tmp/{file.name}"))
+ cfg.volumes.add(BindMount(str(file), f"/tmp/{file.name}"))
cfg.entrypoint = f"/tmp/{file.name}"
@@ -137,7 +137,7 @@ def __call__(self, cfg: ContainerConfiguration):
cfg.volumes.add(
# read_only=False is a temporary workaround to make the mounting of the pro source work
# this can be reverted once we don't need the nested mounting anymore
- VolumeBind(str(source), self.container_paths.localstack_source_dir, read_only=False)
+ BindMount(str(source), self.container_paths.localstack_source_dir, read_only=False)
)
# ext source code if available
@@ -145,7 +145,7 @@ def __call__(self, cfg: ContainerConfiguration):
source = self.host_paths.aws_pro_package_dir
if source.exists():
cfg.volumes.add(
- VolumeBind(
+ BindMount(
str(source), self.container_paths.localstack_pro_source_dir, read_only=True
)
)
@@ -163,7 +163,7 @@ def __call__(self, cfg: ContainerConfiguration):
source = self.host_paths.localstack_project_dir / "bin" / "docker-entrypoint.sh"
if source.exists():
cfg.volumes.add(
- VolumeBind(str(source), self.container_paths.docker_entrypoint, read_only=True)
+ BindMount(str(source), self.container_paths.docker_entrypoint, read_only=True)
)
def try_mount_to_site_packages(self, cfg: ContainerConfiguration, sources_path: Path):
@@ -177,7 +177,7 @@ def try_mount_to_site_packages(self, cfg: ContainerConfiguration, sources_path:
"""
if sources_path.exists():
cfg.volumes.add(
- VolumeBind(
+ BindMount(
str(sources_path),
self.container_paths.dependency_source(sources_path.name),
read_only=True,
@@ -219,7 +219,7 @@ def __call__(self, cfg: ContainerConfiguration):
host_path = self.host_paths.aws_community_package_dir
if host_path.exists():
cfg.volumes.append(
- VolumeBind(
+ BindMount(
str(host_path), self.localstack_community_entry_points, read_only=True
)
)
@@ -244,7 +244,7 @@ def __call__(self, cfg: ContainerConfiguration):
)
if host_path.is_file():
cfg.volumes.add(
- VolumeBind(
+ BindMount(
str(host_path),
str(container_path),
read_only=True,
@@ -260,7 +260,7 @@ def __call__(self, cfg: ContainerConfiguration):
)
if host_path.is_file():
cfg.volumes.add(
- VolumeBind(
+ BindMount(
str(host_path),
str(container_path),
read_only=True,
@@ -270,7 +270,7 @@ def __call__(self, cfg: ContainerConfiguration):
for host_path in self.host_paths.workspace_dir.glob(
f"*/{dep}.egg-info/entry_points.txt"
):
- cfg.volumes.add(VolumeBind(str(host_path), str(container_path), read_only=True))
+ cfg.volumes.add(BindMount(str(host_path), str(container_path), read_only=True))
break
@@ -330,7 +330,7 @@ def __call__(self, cfg: ContainerConfiguration):
if self._has_mount(cfg.volumes, target_path):
continue
- cfg.volumes.append(VolumeBind(str(dep_path), target_path))
+ cfg.volumes.append(BindMount(str(dep_path), target_path))
def _can_be_source_path(self, path: Path) -> bool:
return path.is_dir() or (path.name.endswith(".py") and not path.name.startswith("__"))
diff --git a/localstack-core/localstack/packages/api.py b/localstack-core/localstack/packages/api.py
index b3260e9c5b83f..bcc8add9577c5 100644
--- a/localstack-core/localstack/packages/api.py
+++ b/localstack-core/localstack/packages/api.py
@@ -6,9 +6,9 @@
from enum import Enum
from inspect import getmodule
from threading import RLock
-from typing import Callable, List, Optional, Tuple
+from typing import Any, Callable, Generic, List, Optional, ParamSpec, TypeVar
-from plux import Plugin, PluginManager, PluginSpec
+from plux import Plugin, PluginManager, PluginSpec # type: ignore
from localstack import config
@@ -24,7 +24,7 @@ class PackageException(Exception):
class NoSuchVersionException(PackageException):
"""Exception indicating that a requested installer version is not available / supported."""
- def __init__(self, package: str = None, version: str = None):
+ def __init__(self, package: str | None = None, version: str | None = None):
message = "Unable to find requested version"
if package and version:
message += f"Unable to find requested version '{version}' for package '{package}'"
@@ -123,6 +123,7 @@ def get_installed_dir(self) -> str | None:
directory = self._get_install_dir(target)
if directory and os.path.exists(self._get_install_marker_path(directory)):
return directory
+ return None
def _get_install_dir(self, target: InstallTarget) -> str:
"""
@@ -181,7 +182,12 @@ def _post_process(self, target: InstallTarget) -> None:
pass
-class Package(abc.ABC):
+# With Python 3.13 we should be able to set PackageInstaller as the default
+# https://typing.python.org/en/latest/spec/generics.html#type-parameter-defaults
+T = TypeVar("T", bound=PackageInstaller)
+
+
+class Package(abc.ABC, Generic[T]):
"""
A Package defines a specific kind of software, mostly used as backends or supporting system for service
implementations.
@@ -214,7 +220,7 @@ def install(self, version: str | None = None, target: Optional[InstallTarget] =
self.get_installer(version).install(target)
@functools.lru_cache()
- def get_installer(self, version: str | None = None) -> PackageInstaller:
+ def get_installer(self, version: str | None = None) -> T:
"""
Returns the installer instance for a specific version of the package.
@@ -237,7 +243,7 @@ def get_versions(self) -> List[str]:
"""
raise NotImplementedError()
- def _get_installer(self, version: str) -> PackageInstaller:
+ def _get_installer(self, version: str) -> T:
"""
Internal lookup function which needs to be implemented by specific packages.
It creates PackageInstaller instances for the specific version.
@@ -247,7 +253,7 @@ def _get_installer(self, version: str) -> PackageInstaller:
"""
raise NotImplementedError()
- def __str__(self):
+ def __str__(self) -> str:
return self.name
@@ -298,7 +304,7 @@ def _get_install_marker_path(self, install_dir: str) -> str:
PLUGIN_NAMESPACE = "localstack.packages"
-class PackagesPlugin(Plugin):
+class PackagesPlugin(Plugin): # type: ignore[misc]
"""
Plugin implementation for Package plugins.
A package plugin exposes a specific package instance.
@@ -311,8 +317,8 @@ def __init__(
self,
name: str,
scope: str,
- get_package: Callable[[], Package | List[Package]],
- should_load: Callable[[], bool] = None,
+ get_package: Callable[[], Package[PackageInstaller] | List[Package[PackageInstaller]]],
+ should_load: Callable[[], bool] | None = None,
) -> None:
super().__init__()
self.name = name
@@ -325,11 +331,11 @@ def should_load(self) -> bool:
return self._should_load()
return True
- def get_package(self) -> Package:
+ def get_package(self) -> Package[PackageInstaller]:
"""
:return: returns the package instance of this package plugin
"""
- return self._get_package()
+ return self._get_package() # type: ignore[return-value]
class NoSuchPackageException(PackageException):
@@ -338,20 +344,20 @@ class NoSuchPackageException(PackageException):
pass
-class PackagesPluginManager(PluginManager[PackagesPlugin]):
+class PackagesPluginManager(PluginManager[PackagesPlugin]): # type: ignore[misc]
"""PluginManager which simplifies the loading / access of PackagesPlugins and their exposed package instances."""
- def __init__(self):
+ def __init__(self) -> None:
super().__init__(PLUGIN_NAMESPACE)
- def get_all_packages(self) -> List[Tuple[str, str, Package]]:
+ def get_all_packages(self) -> list[tuple[str, str, Package[PackageInstaller]]]:
return sorted(
[(plugin.name, plugin.scope, plugin.get_package()) for plugin in self.load_all()]
)
def get_packages(
- self, package_names: List[str], version: Optional[str] = None
- ) -> List[Package]:
+ self, package_names: list[str], version: Optional[str] = None
+ ) -> list[Package[PackageInstaller]]:
# Plugin names are unique, but there could be multiple packages with the same name in different scopes
plugin_specs_per_name = defaultdict(list)
# Plugin names have the format "/", build a dict of specs per package name for the lookup
@@ -359,7 +365,7 @@ def get_packages(
(package_name, _, _) = plugin_spec.name.rpartition("/")
plugin_specs_per_name[package_name].append(plugin_spec)
- package_instances: List[Package] = []
+ package_instances: list[Package[PackageInstaller]] = []
for package_name in package_names:
plugin_specs = plugin_specs_per_name.get(package_name)
if not plugin_specs:
@@ -377,9 +383,15 @@ def get_packages(
return package_instances
+P = ParamSpec("P")
+T2 = TypeVar("T2")
+
+
def package(
- name: str = None, scope: str = "community", should_load: Optional[Callable[[], bool]] = None
-):
+ name: str | None = None,
+ scope: str = "community",
+ should_load: Optional[Callable[[], bool]] = None,
+) -> Callable[[Callable[[], Package[Any] | list[Package[Any]]]], PluginSpec]:
"""
Decorator for marking methods that create Package instances as a PackagePlugin.
Methods marked with this decorator are discoverable as a PluginSpec within the namespace "localstack.packages",
@@ -387,8 +399,8 @@ def package(
service name.
"""
- def wrapper(fn):
- _name = name or getmodule(fn).__name__.split(".")[-2]
+ def wrapper(fn: Callable[[], Package[Any] | list[Package[Any]]]) -> PluginSpec:
+ _name = name or getmodule(fn).__name__.split(".")[-2] # type: ignore[union-attr]
@functools.wraps(fn)
def factory() -> PackagesPlugin:
diff --git a/localstack-core/localstack/packages/core.py b/localstack-core/localstack/packages/core.py
index ae04a4b70f171..5b8996deaa844 100644
--- a/localstack-core/localstack/packages/core.py
+++ b/localstack-core/localstack/packages/core.py
@@ -4,7 +4,7 @@
from abc import ABC
from functools import lru_cache
from sys import version_info
-from typing import Optional, Tuple
+from typing import Any, Optional, Tuple
import requests
@@ -39,6 +39,7 @@ def get_executable_path(self) -> str | None:
install_dir = self.get_installed_dir()
if install_dir:
return self._get_install_marker_path(install_dir)
+ return None
class DownloadInstaller(ExecutableInstaller):
@@ -104,6 +105,7 @@ def get_executable_path(self) -> str | None:
if install_dir:
install_dir = install_dir[: -len(subdir)]
return self._get_install_marker_path(install_dir)
+ return None
def _install(self, target: InstallTarget) -> None:
target_directory = self._get_install_dir(target)
@@ -133,7 +135,7 @@ def _install(self, target: InstallTarget) -> None:
class PermissionDownloadInstaller(DownloadInstaller, ABC):
def _install(self, target: InstallTarget) -> None:
super()._install(target)
- chmod_r(self.get_executable_path(), 0o777)
+ chmod_r(self.get_executable_path(), 0o777) # type: ignore[arg-type]
class GitHubReleaseInstaller(PermissionDownloadInstaller):
@@ -249,11 +251,11 @@ class PythonPackageInstaller(PackageInstaller):
normalized_name: str
"""Normalized package name according to PEP440."""
- def __init__(self, name: str, version: str, *args, **kwargs):
+ def __init__(self, name: str, version: str, *args: Any, **kwargs: Any):
super().__init__(name, version, *args, **kwargs)
self.normalized_name = self._normalize_package_name(name)
- def _normalize_package_name(self, name: str):
+ def _normalize_package_name(self, name: str) -> str:
"""
Normalized the Python package name according to PEP440.
https://packaging.python.org/en/latest/specifications/name-normalization/#name-normalization
diff --git a/localstack-core/localstack/packages/debugpy.py b/localstack-core/localstack/packages/debugpy.py
index bd2a768b08cd7..2731236f747a1 100644
--- a/localstack-core/localstack/packages/debugpy.py
+++ b/localstack-core/localstack/packages/debugpy.py
@@ -4,14 +4,14 @@
from localstack.utils.run import run
-class DebugPyPackage(Package):
- def __init__(self):
+class DebugPyPackage(Package["DebugPyPackageInstaller"]):
+ def __init__(self) -> None:
super().__init__("DebugPy", "latest")
def get_versions(self) -> List[str]:
return ["latest"]
- def _get_installer(self, version: str) -> PackageInstaller:
+ def _get_installer(self, version: str) -> "DebugPyPackageInstaller":
return DebugPyPackageInstaller("debugpy", version)
@@ -20,7 +20,7 @@ class DebugPyPackageInstaller(PackageInstaller):
def is_installed(self) -> bool:
try:
- import debugpy # noqa: T100
+ import debugpy # type: ignore[import-not-found] # noqa: T100
assert debugpy
return True
diff --git a/localstack-core/localstack/packages/ffmpeg.py b/localstack-core/localstack/packages/ffmpeg.py
index 096c4fae34a79..59279701ec81d 100644
--- a/localstack-core/localstack/packages/ffmpeg.py
+++ b/localstack-core/localstack/packages/ffmpeg.py
@@ -1,7 +1,7 @@
import os
from typing import List
-from localstack.packages import Package, PackageInstaller
+from localstack.packages import Package
from localstack.packages.core import ArchiveDownloadAndExtractInstaller
from localstack.utils.platform import get_arch
@@ -10,11 +10,11 @@
)
-class FfmpegPackage(Package):
- def __init__(self):
+class FfmpegPackage(Package["FfmpegPackageInstaller"]):
+ def __init__(self) -> None:
super().__init__(name="ffmpeg", default_version="7.0.1")
- def _get_installer(self, version: str) -> PackageInstaller:
+ def _get_installer(self, version: str) -> "FfmpegPackageInstaller":
return FfmpegPackageInstaller(version)
def get_versions(self) -> List[str]:
@@ -35,10 +35,10 @@ def _get_archive_subdir(self) -> str:
return f"ffmpeg-{self.version}-{get_arch()}-static"
def get_ffmpeg_path(self) -> str:
- return os.path.join(self.get_installed_dir(), "ffmpeg")
+ return os.path.join(self.get_installed_dir(), "ffmpeg") # type: ignore[arg-type]
def get_ffprobe_path(self) -> str:
- return os.path.join(self.get_installed_dir(), "ffprobe")
+ return os.path.join(self.get_installed_dir(), "ffprobe") # type: ignore[arg-type]
ffmpeg_package = FfmpegPackage()
diff --git a/localstack-core/localstack/packages/java.py b/localstack-core/localstack/packages/java.py
index c37792ffc011a..c8a2e9f7c7f21 100644
--- a/localstack-core/localstack/packages/java.py
+++ b/localstack-core/localstack/packages/java.py
@@ -47,8 +47,11 @@ def get_java_lib_path(self) -> str | None:
if is_mac_os():
return os.path.join(java_home, "lib", "jli", "libjli.dylib")
return os.path.join(java_home, "lib", "server", "libjvm.so")
+ return None
- def get_java_env_vars(self, path: str = None, ld_library_path: str = None) -> dict[str, str]:
+ def get_java_env_vars(
+ self, path: str | None = None, ld_library_path: str | None = None
+ ) -> dict[str, str]:
"""
Returns environment variables pointing to the Java installation. This is useful to build the environment where
the application will run.
@@ -64,16 +67,16 @@ def get_java_env_vars(self, path: str = None, ld_library_path: str = None) -> di
path = path or os.environ["PATH"]
- ld_library_path = ld_library_path or os.environ.get("LD_LIBRARY_PATH")
+ library_path = ld_library_path or os.environ.get("LD_LIBRARY_PATH")
# null paths (e.g. `:/foo`) have a special meaning according to the manpages
- if ld_library_path is None:
- ld_library_path = f"{java_home}/lib:{java_home}/lib/server"
+ if library_path is None:
+ full_library_path = f"{java_home}/lib:{java_home}/lib/server"
else:
- ld_library_path = f"{java_home}/lib:{java_home}/lib/server:{ld_library_path}"
+ full_library_path = f"{java_home}/lib:{java_home}/lib/server:{library_path}"
return {
- "JAVA_HOME": java_home,
- "LD_LIBRARY_PATH": ld_library_path,
+ "JAVA_HOME": java_home, # type: ignore[dict-item]
+ "LD_LIBRARY_PATH": full_library_path,
"PATH": f"{java_bin}:{path}",
}
@@ -144,7 +147,7 @@ def get_java_home(self) -> str | None:
"""
installed_dir = self.get_installed_dir()
if is_mac_os():
- return os.path.join(installed_dir, "Contents", "Home")
+ return os.path.join(installed_dir, "Contents", "Home") # type: ignore[arg-type]
return installed_dir
@property
@@ -188,14 +191,14 @@ def _download_url_fallback(self) -> str:
)
-class JavaPackage(Package):
+class JavaPackage(Package[JavaPackageInstaller]):
def __init__(self, default_version: str = DEFAULT_JAVA_VERSION):
super().__init__(name="Java", default_version=default_version)
def get_versions(self) -> List[str]:
return list(JAVA_VERSIONS.keys())
- def _get_installer(self, version):
+ def _get_installer(self, version: str) -> JavaPackageInstaller:
return JavaPackageInstaller(version)
diff --git a/localstack-core/localstack/packages/plugins.py b/localstack-core/localstack/packages/plugins.py
index 4b4b200af8e0c..fdeba86a04204 100644
--- a/localstack-core/localstack/packages/plugins.py
+++ b/localstack-core/localstack/packages/plugins.py
@@ -1,22 +1,29 @@
+from typing import TYPE_CHECKING
+
from localstack.packages.api import Package, package
+if TYPE_CHECKING:
+ from localstack.packages.ffmpeg import FfmpegPackageInstaller
+ from localstack.packages.java import JavaPackageInstaller
+ from localstack.packages.terraform import TerraformPackageInstaller
+
@package(name="terraform")
-def terraform_package() -> Package:
+def terraform_package() -> Package["TerraformPackageInstaller"]:
from .terraform import terraform_package
return terraform_package
@package(name="ffmpeg")
-def ffmpeg_package() -> Package:
+def ffmpeg_package() -> Package["FfmpegPackageInstaller"]:
from localstack.packages.ffmpeg import ffmpeg_package
return ffmpeg_package
@package(name="java")
-def java_package() -> Package:
+def java_package() -> Package["JavaPackageInstaller"]:
from localstack.packages.java import java_package
return java_package
diff --git a/localstack-core/localstack/packages/terraform.py b/localstack-core/localstack/packages/terraform.py
index 703380c54c07e..6ee590f0387b5 100644
--- a/localstack-core/localstack/packages/terraform.py
+++ b/localstack-core/localstack/packages/terraform.py
@@ -2,7 +2,7 @@
import platform
from typing import List
-from localstack.packages import InstallTarget, Package, PackageInstaller
+from localstack.packages import InstallTarget, Package
from localstack.packages.core import ArchiveDownloadAndExtractInstaller
from localstack.utils.files import chmod_r
from localstack.utils.platform import get_arch
@@ -13,14 +13,14 @@
)
-class TerraformPackage(Package):
- def __init__(self):
+class TerraformPackage(Package["TerraformPackageInstaller"]):
+ def __init__(self) -> None:
super().__init__("Terraform", TERRAFORM_VERSION)
def get_versions(self) -> List[str]:
return [TERRAFORM_VERSION]
- def _get_installer(self, version: str) -> PackageInstaller:
+ def _get_installer(self, version: str) -> "TerraformPackageInstaller":
return TerraformPackageInstaller("terraform", version)
@@ -35,7 +35,7 @@ def _get_download_url(self) -> str:
def _install(self, target: InstallTarget) -> None:
super()._install(target)
- chmod_r(self.get_executable_path(), 0o777)
+ chmod_r(self.get_executable_path(), 0o777) # type: ignore[arg-type]
terraform_package = TerraformPackage()
diff --git a/localstack-core/localstack/py.typed b/localstack-core/localstack/py.typed
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/runtime/analytics.py b/localstack-core/localstack/runtime/analytics.py
index 6882878dac2ac..2612ee8637bf9 100644
--- a/localstack-core/localstack/runtime/analytics.py
+++ b/localstack-core/localstack/runtime/analytics.py
@@ -85,6 +85,7 @@
"OUTBOUND_HTTP_PROXY",
"OUTBOUND_HTTPS_PROXY",
"S3_DIR",
+ "SFN_MOCK_CONFIG",
"TMPDIR",
]
diff --git a/localstack-core/localstack/services/apigateway/analytics.py b/localstack-core/localstack/services/apigateway/analytics.py
new file mode 100644
index 0000000000000..13bd7109358ce
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/analytics.py
@@ -0,0 +1,5 @@
+from localstack.utils.analytics.metrics import Counter
+
+invocation_counter = Counter(
+ namespace="apigateway", name="rest_api_execute", labels=["invocation_type"]
+)
diff --git a/localstack-core/localstack/services/apigateway/exporter.py b/localstack-core/localstack/services/apigateway/exporter.py
index 42614ab4def8f..0706e794c1651 100644
--- a/localstack-core/localstack/services/apigateway/exporter.py
+++ b/localstack-core/localstack/services/apigateway/exporter.py
@@ -190,7 +190,15 @@ def export(
self._add_paths(spec, resources, with_extension)
self._add_models(spec, models["items"], "#/definitions")
- return getattr(spec, self.export_formats.get(export_format))()
+ response = getattr(spec, self.export_formats.get(export_format))()
+ if (
+ with_extension
+ and isinstance(response, dict)
+ and (binary_media_types := rest_api.get("binaryMediaTypes")) is not None
+ ):
+ response[OpenAPIExt.BINARY_MEDIA_TYPES] = binary_media_types
+
+ return response
class _OpenApiOAS30Exporter(_BaseOpenApiExporter):
@@ -298,8 +306,16 @@ def export(
self._add_models(spec, models["items"], "#/components/schemas")
response = getattr(spec, self.export_formats.get(export_format))()
- if isinstance(response, dict) and "components" not in response:
- response["components"] = {}
+ if isinstance(response, dict):
+ if "components" not in response:
+ response["components"] = {}
+
+ if (
+ with_extension
+ and (binary_media_types := rest_api.get("binaryMediaTypes")) is not None
+ ):
+ response[OpenAPIExt.BINARY_MEDIA_TYPES] = binary_media_types
+
return response
diff --git a/localstack-core/localstack/services/apigateway/helpers.py b/localstack-core/localstack/services/apigateway/helpers.py
index cde25c4bdaba2..6cb103d50f637 100644
--- a/localstack-core/localstack/services/apigateway/helpers.py
+++ b/localstack-core/localstack/services/apigateway/helpers.py
@@ -3,7 +3,6 @@
import hashlib
import json
import logging
-from datetime import datetime
from typing import List, Optional, TypedDict, Union
from urllib import parse as urlparse
@@ -61,7 +60,6 @@
{formatted_date} : Method completed with status: {status_code}
"""
-
EMPTY_MODEL = "Empty"
ERROR_MODEL = "Error"
@@ -494,8 +492,10 @@ def import_api_from_openapi_spec(
region_name = context.region
# TODO:
- # 1. validate the "mode" property of the spec document, "merge" or "overwrite"
+ # 1. validate the "mode" property of the spec document, "merge" or "overwrite", and properly apply it
+ # for now, it only considers it for the binaryMediaTypes
# 2. validate the document type, "swagger" or "openapi"
+ mode = request.get("mode", "merge")
rest_api.version = (
str(version) if (version := resolved_schema.get("info", {}).get("version")) else None
@@ -950,7 +950,14 @@ def create_method_resource(child, method, method_schema):
get_or_create_path(base_path + path, base_path=base_path)
# binary types
- rest_api.binaryMediaTypes = resolved_schema.get(OpenAPIExt.BINARY_MEDIA_TYPES, [])
+ if mode == "merge":
+ existing_binary_media_types = rest_api.binaryMediaTypes or []
+ else:
+ existing_binary_media_types = []
+
+ rest_api.binaryMediaTypes = existing_binary_media_types + resolved_schema.get(
+ OpenAPIExt.BINARY_MEDIA_TYPES, []
+ )
policy = resolved_schema.get(OpenAPIExt.POLICY)
if policy:
@@ -984,35 +991,6 @@ def is_variable_path(path_part: str) -> bool:
return path_part.startswith("{") and path_part.endswith("}")
-def log_template(
- request_id: str,
- date: datetime,
- http_method: str,
- resource_path: str,
- request_path: str,
- query_string: str,
- request_headers: str,
- request_body: str,
- response_body: str,
- response_headers: str,
- status_code: str,
-):
- formatted_date = date.strftime("%a %b %d %H:%M:%S %Z %Y")
- return INVOKE_TEST_LOG_TEMPLATE.format(
- request_id=request_id,
- formatted_date=formatted_date,
- http_method=http_method,
- resource_path=resource_path,
- request_path=request_path,
- query_string=query_string,
- request_headers=request_headers,
- request_body=request_body,
- response_body=response_body,
- response_headers=response_headers,
- status_code=status_code,
- )
-
-
def get_domain_name_hash(domain_name: str) -> str:
"""
Return a hash of the given domain name, which help construct regional domain names for APIs.
diff --git a/localstack-core/localstack/services/apigateway/legacy/provider.py b/localstack-core/localstack/services/apigateway/legacy/provider.py
index 996e9d170dc1a..ecdab2873a7bd 100644
--- a/localstack-core/localstack/services/apigateway/legacy/provider.py
+++ b/localstack-core/localstack/services/apigateway/legacy/provider.py
@@ -98,6 +98,7 @@
from localstack.services.apigateway.helpers import (
EMPTY_MODEL,
ERROR_MODEL,
+ INVOKE_TEST_LOG_TEMPLATE,
OpenAPIExt,
apply_json_patch_safe,
get_apigateway_store,
@@ -108,7 +109,6 @@
import_api_from_openapi_spec,
is_greedy_path,
is_variable_path,
- log_template,
resolve_references,
)
from localstack.services.apigateway.legacy.helpers import multi_value_dict_for_list
@@ -217,9 +217,10 @@ def test_invoke_method(
# TODO: add the missing fields to the log. Next iteration will add helpers to extract the missing fields
# from the apicontext
- log = log_template(
+ formatted_date = req_start_time.strftime("%a %b %d %H:%M:%S %Z %Y")
+ log = INVOKE_TEST_LOG_TEMPLATE.format(
request_id=invocation_context.context["requestId"],
- date=req_start_time,
+ formatted_date=formatted_date,
http_method=invocation_context.method,
resource_path=invocation_context.invocation_path,
request_path="",
@@ -230,6 +231,7 @@ def test_invoke_method(
response_headers=result.headers,
status_code=result.status_code,
)
+
return TestInvokeMethodResponse(
status=result.status_code,
headers=dict(result.headers),
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py
index 5e4a8a27f97b4..e9e1dcb618166 100644
--- a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py
@@ -1,5 +1,7 @@
from rolo.gateway import CompositeHandler
+from localstack.services.apigateway.analytics import invocation_counter
+
from .analytics import IntegrationUsageCounter
from .api_key_validation import ApiKeyValidationHandler
from .gateway_exception import GatewayExceptionHandler
@@ -24,4 +26,4 @@
gateway_exception_handler = GatewayExceptionHandler()
api_key_validation_handler = ApiKeyValidationHandler()
response_enricher = InvocationResponseEnricher()
-usage_counter = IntegrationUsageCounter()
+usage_counter = IntegrationUsageCounter(counter=invocation_counter)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py
index 82ba2b7d2593c..7c6525eb0e7e1 100644
--- a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py
@@ -1,7 +1,7 @@
import logging
from localstack.http import Response
-from localstack.utils.analytics.metrics import Counter, LabeledCounterMetric
+from localstack.utils.analytics.metrics import LabeledCounterMetric
from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
from ..context import RestApiInvocationContext
@@ -12,10 +12,8 @@
class IntegrationUsageCounter(RestApiGatewayHandler):
counter: LabeledCounterMetric
- def __init__(self, counter: LabeledCounterMetric = None):
- self.counter = counter or Counter(
- namespace="apigateway", name="rest_api_execute", labels=["invocation_type"]
- )
+ def __init__(self, counter: LabeledCounterMetric):
+ self.counter = counter
def __call__(
self,
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py
index 7f6ae374afdac..02d09db8332c1 100644
--- a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py
@@ -69,7 +69,7 @@ def __call__(
# we first need to find the right IntegrationResponse based on their selection template, linked to the status
# code of the Response
if integration_type == IntegrationType.AWS and "lambda:path/" in integration["uri"]:
- selection_value = self.parse_error_message_from_lambda(body) or str(status_code)
+ selection_value = self.parse_error_message_from_lambda(body)
else:
selection_value = str(status_code)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py
index 7e84967df5004..93f509b8aed88 100644
--- a/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py
@@ -124,7 +124,7 @@ def __init__(self, router: Router[Handler] = None, handler: ApiGatewayEndpoint =
def register_routes(self) -> None:
LOG.debug("Registering API Gateway routes.")
- host_pattern = ".execute-api."
+ host_pattern = ".execute-api."
deprecated_route_endpoint = deprecated_endpoint(
endpoint=self.handler,
previous_path="/restapis///_user_request_",
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py
new file mode 100644
index 0000000000000..4ed1a4c0db845
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py
@@ -0,0 +1,206 @@
+import datetime
+from urllib.parse import parse_qs
+
+from rolo import Request
+from rolo.gateway.chain import HandlerChain
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import TestInvokeMethodRequest, TestInvokeMethodResponse
+from localstack.constants import APPLICATION_JSON
+from localstack.http import Response
+from localstack.utils.strings import to_bytes, to_str
+
+from ...models import RestApiDeployment
+from . import handlers
+from .context import InvocationRequest, RestApiInvocationContext
+from .handlers.resource_router import RestAPIResourceRouter
+from .header_utils import build_multi_value_headers
+from .template_mapping import dict_to_string
+
+# TODO: we probably need to write and populate those logs as part of the handler chain itself
+# and store it in the InvocationContext. That way, we could also retrieve in when calling TestInvoke
+
+TEST_INVOKE_TEMPLATE = """Execution log for request {request_id}
+{formatted_date} : Starting execution for request: {request_id}
+{formatted_date} : HTTP Method: {request_method}, Resource Path: {resource_path}
+{formatted_date} : Method request path: {method_request_path_parameters}
+{formatted_date} : Method request query string: {method_request_query_string}
+{formatted_date} : Method request headers: {method_request_headers}
+{formatted_date} : Method request body before transformations: {method_request_body}
+{formatted_date} : Endpoint request URI: {endpoint_uri}
+{formatted_date} : Endpoint request headers: {endpoint_request_headers}
+{formatted_date} : Endpoint request body after transformations: {endpoint_request_body}
+{formatted_date} : Sending request to {endpoint_uri}
+{formatted_date} : Received response. Status: {endpoint_response_status_code}, Integration latency: {endpoint_response_latency} ms
+{formatted_date} : Endpoint response headers: {endpoint_response_headers}
+{formatted_date} : Endpoint response body before transformations: {endpoint_response_body}
+{formatted_date} : Method response body after transformations: {method_response_body}
+{formatted_date} : Method response headers: {method_response_headers}
+{formatted_date} : Successfully completed execution
+{formatted_date} : Method completed with status: {method_response_status}
+"""
+
+
+def _dump_headers(headers: Headers) -> str:
+ if not headers:
+ return "{}"
+ multi_headers = {key: ",".join(headers.getlist(key)) for key in headers.keys()}
+ string_headers = dict_to_string(multi_headers)
+ if len(string_headers) > 998:
+ return f"{string_headers[:998]} [TRUNCATED]"
+
+ return string_headers
+
+
+def log_template(invocation_context: RestApiInvocationContext, response_headers: Headers) -> str:
+ # TODO: funny enough, in AWS for the `endpoint_response_headers` in AWS_PROXY, they log the response headers from
+ # lambda HTTP Invoke call even though we use the headers from the lambda response itself
+ formatted_date = datetime.datetime.now(tz=datetime.UTC).strftime("%a %b %d %H:%M:%S %Z %Y")
+ request = invocation_context.invocation_request
+ context_var = invocation_context.context_variables
+ integration_req = invocation_context.integration_request
+ endpoint_resp = invocation_context.endpoint_response
+ method_resp = invocation_context.invocation_response
+ # TODO: if endpoint_uri is an ARN, it means it's an AWS_PROXY integration
+ # this should be transformed to the true URL of a lambda invoke call
+ endpoint_uri = integration_req.get("uri", "")
+
+ return TEST_INVOKE_TEMPLATE.format(
+ formatted_date=formatted_date,
+ request_id=context_var["requestId"],
+ resource_path=request["path"],
+ request_method=request["http_method"],
+ method_request_path_parameters=dict_to_string(request["path_parameters"]),
+ method_request_query_string=dict_to_string(request["query_string_parameters"]),
+ method_request_headers=_dump_headers(request.get("headers")),
+ method_request_body=to_str(request.get("body", "")),
+ endpoint_uri=endpoint_uri,
+ endpoint_request_headers=_dump_headers(integration_req.get("headers")),
+ endpoint_request_body=to_str(integration_req.get("body", "")),
+ # TODO: measure integration latency
+ endpoint_response_latency=150,
+ endpoint_response_status_code=endpoint_resp.get("status_code"),
+ endpoint_response_body=to_str(endpoint_resp.get("body", "")),
+ endpoint_response_headers=_dump_headers(endpoint_resp.get("headers")),
+ method_response_status=method_resp.get("status_code"),
+ method_response_body=to_str(method_resp.get("body", "")),
+ method_response_headers=_dump_headers(response_headers),
+ )
+
+
+def create_test_chain() -> HandlerChain[RestApiInvocationContext]:
+ return HandlerChain(
+ request_handlers=[
+ handlers.method_request_handler,
+ handlers.integration_request_handler,
+ handlers.integration_handler,
+ handlers.integration_response_handler,
+ handlers.method_response_handler,
+ ],
+ exception_handlers=[
+ handlers.gateway_exception_handler,
+ ],
+ )
+
+
+def create_test_invocation_context(
+ test_request: TestInvokeMethodRequest,
+ deployment: RestApiDeployment,
+) -> RestApiInvocationContext:
+ parse_handler = handlers.parse_request
+ http_method = test_request["httpMethod"]
+
+ # we do not need a true HTTP request for the context, as we are skipping all the parsing steps and using the
+ # provider data
+ invocation_context = RestApiInvocationContext(
+ request=Request(method=http_method),
+ )
+ path_query = test_request.get("pathWithQueryString", "/").split("?")
+ path = path_query[0]
+ multi_query_args: dict[str, list[str]] = {}
+
+ if len(path_query) > 1:
+ multi_query_args = parse_qs(path_query[1])
+
+ # for the single value parameters, AWS only keeps the last value of the list
+ single_query_args = {k: v[-1] for k, v in multi_query_args.items()}
+
+ invocation_request = InvocationRequest(
+ http_method=http_method,
+ path=path,
+ raw_path=path,
+ query_string_parameters=single_query_args,
+ multi_value_query_string_parameters=multi_query_args,
+ headers=Headers(test_request.get("headers")),
+ # TODO: handle multiValueHeaders
+ body=to_bytes(test_request.get("body") or ""),
+ )
+ invocation_context.invocation_request = invocation_request
+
+ _, path_parameters = RestAPIResourceRouter(deployment).match(invocation_context)
+ invocation_request["path_parameters"] = path_parameters
+
+ invocation_context.deployment = deployment
+ invocation_context.api_id = test_request["restApiId"]
+ invocation_context.stage = None
+ invocation_context.deployment_id = ""
+ invocation_context.account_id = deployment.account_id
+ invocation_context.region = deployment.region
+ invocation_context.stage_variables = test_request.get("stageVariables", {})
+ invocation_context.context_variables = parse_handler.create_context_variables(
+ invocation_context
+ )
+ invocation_context.trace_id = parse_handler.populate_trace_id({})
+
+ resource = deployment.rest_api.resources[test_request["resourceId"]]
+ resource_method = resource["resourceMethods"][http_method]
+ invocation_context.resource = resource
+ invocation_context.resource_method = resource_method
+ invocation_context.integration = resource_method["methodIntegration"]
+ handlers.route_request.update_context_variables_with_resource(
+ invocation_context.context_variables, resource
+ )
+
+ return invocation_context
+
+
+def run_test_invocation(
+ test_request: TestInvokeMethodRequest, deployment: RestApiDeployment
+) -> TestInvokeMethodResponse:
+ # validate resource exists in deployment
+ invocation_context = create_test_invocation_context(test_request, deployment)
+
+ test_chain = create_test_chain()
+ # header order is important
+ if invocation_context.integration["type"] == "MOCK":
+ base_headers = {"Content-Type": APPLICATION_JSON}
+ else:
+ # we manually add the trace-id, as it is normally added by handlers.response_enricher which adds to much data
+ # for the TestInvoke. It needs to be first
+ base_headers = {
+ "X-Amzn-Trace-Id": invocation_context.trace_id,
+ "Content-Type": APPLICATION_JSON,
+ }
+
+ test_response = Response(headers=base_headers)
+ start_time = datetime.datetime.now()
+ test_chain.handle(context=invocation_context, response=test_response)
+ end_time = datetime.datetime.now()
+
+ response_headers = test_response.headers.copy()
+ # AWS does not return the Content-Length for TestInvokeMethod
+ response_headers.remove("Content-Length")
+
+ log = log_template(invocation_context, response_headers)
+
+ headers = dict(response_headers)
+ multi_value_headers = build_multi_value_headers(response_headers)
+
+ return TestInvokeMethodResponse(
+ log=log,
+ status=test_response.status_code,
+ body=test_response.get_data(as_text=True),
+ headers=headers,
+ multiValueHeaders=multi_value_headers,
+ latency=int((end_time - start_time).total_seconds()),
+ )
diff --git a/localstack-core/localstack/services/apigateway/next_gen/provider.py b/localstack-core/localstack/services/apigateway/next_gen/provider.py
index 9361e08ae94fd..9c3dab33bfe86 100644
--- a/localstack-core/localstack/services/apigateway/next_gen/provider.py
+++ b/localstack-core/localstack/services/apigateway/next_gen/provider.py
@@ -37,6 +37,7 @@
)
from .execute_api.helpers import freeze_rest_api
from .execute_api.router import ApiGatewayEndpoint, ApiGatewayRouter
+from .execute_api.test_invoke import run_test_invocation
class ApigatewayNextGenProvider(ApigatewayProvider):
@@ -242,8 +243,28 @@ def get_gateway_responses(
def test_invoke_method(
self, context: RequestContext, request: TestInvokeMethodRequest
) -> TestInvokeMethodResponse:
- # TODO: rewrite and migrate to NextGen
- return super().test_invoke_method(context, request)
+ rest_api_id = request["restApiId"]
+ moto_rest_api = get_moto_rest_api(context=context, rest_api_id=rest_api_id)
+ resource = moto_rest_api.resources.get(request["resourceId"])
+ if not resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ # test httpMethod
+
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+ frozen_deployment = freeze_rest_api(
+ account_id=context.account_id,
+ region=context.region,
+ moto_rest_api=moto_rest_api,
+ localstack_rest_api=rest_api_container,
+ )
+
+ response = run_test_invocation(
+ test_request=request,
+ deployment=frozen_deployment,
+ )
+
+ return response
def _get_gateway_response_or_default(
diff --git a/localstack-core/localstack/services/cloudformation/api_utils.py b/localstack-core/localstack/services/cloudformation/api_utils.py
index 556435ed699a7..c4172974cec35 100644
--- a/localstack-core/localstack/services/cloudformation/api_utils.py
+++ b/localstack-core/localstack/services/cloudformation/api_utils.py
@@ -4,6 +4,7 @@
from localstack import config, constants
from localstack.aws.connect import connect_to
+from localstack.services.cloudformation.engine.validations import ValidationError
from localstack.services.s3.utils import (
extract_bucket_name_and_key_from_headers_and_path,
normalize_bucket_name,
@@ -32,6 +33,61 @@ def prepare_template_body(req_data: dict) -> str | bytes | None: # TODO: mutati
return modified_template_body
+def extract_template_body(request: dict) -> str:
+ """
+ Given a request payload, fetch the body of the template either from S3 or from the payload itself
+ """
+ if template_body := request.get("TemplateBody"):
+ if request.get("TemplateURL"):
+ raise ValidationError(
+ "Specify exactly one of 'TemplateBody' or 'TemplateUrl'"
+ ) # TODO: check proper message
+
+ return template_body
+
+ elif template_url := request.get("TemplateURL"):
+ template_url = convert_s3_to_local_url(template_url)
+ return get_remote_template_body(template_url)
+
+ else:
+ raise ValidationError(
+ "Specify exactly one of 'TemplateBody' or 'TemplateUrl'"
+ ) # TODO: check proper message
+
+
+def get_remote_template_body(url: str) -> str:
+ response = run_safe(lambda: safe_requests.get(url, verify=False))
+ # check error codes, and code 301 - fixes https://github.com/localstack/localstack/issues/1884
+ status_code = 0 if response is None else response.status_code
+ if 200 <= status_code < 300:
+ # request was ok
+ return response.text
+ elif response is None or status_code == 301 or status_code >= 400:
+ # check if this is an S3 URL, then get the file directly from there
+ url = convert_s3_to_local_url(url)
+ if is_local_service_url(url):
+ parsed_path = urlparse(url).path.lstrip("/")
+ parts = parsed_path.partition("/")
+ client = connect_to().s3
+ LOG.debug(
+ "Download CloudFormation template content from local S3: %s - %s",
+ parts[0],
+ parts[2],
+ )
+ result = client.get_object(Bucket=parts[0], Key=parts[2])
+ body = to_str(result["Body"].read())
+ return body
+ raise RuntimeError(
+ "Unable to fetch template body (code %s) from URL %s" % (status_code, url)
+ )
+ else:
+ raise RuntimeError(
+ f"Bad status code from fetching template from url '{url}' ({status_code})",
+ url,
+ status_code,
+ )
+
+
def get_template_body(req_data: dict) -> str:
body = req_data.get("TemplateBody")
if body:
diff --git a/localstack-core/localstack/services/cloudformation/engine/entities.py b/localstack-core/localstack/services/cloudformation/engine/entities.py
index cd2a2517432fd..d9f07f0281e0b 100644
--- a/localstack-core/localstack/services/cloudformation/engine/entities.py
+++ b/localstack-core/localstack/services/cloudformation/engine/entities.py
@@ -49,7 +49,7 @@ def __init__(self, metadata: dict):
self.stack = None
-class StackMetadata(TypedDict):
+class CreateChangeSetInput(TypedDict):
StackName: str
Capabilities: list[Capability]
ChangeSetName: Optional[str]
@@ -83,7 +83,7 @@ def __init__(
self,
account_id: str,
region_name: str,
- metadata: Optional[StackMetadata] = None,
+ metadata: Optional[CreateChangeSetInput] = None,
template: Optional[StackTemplate] = None,
template_body: Optional[str] = None,
):
@@ -297,6 +297,10 @@ def resources(self):
"""Return dict of resources"""
return dict(self.template_resources)
+ @resources.setter
+ def resources(self, resources: dict):
+ self.template["Resources"] = resources
+
@property
def template_resources(self):
return self.template.setdefault("Resources", {})
@@ -370,8 +374,17 @@ def copy(self):
# TODO: what functionality of the Stack object do we rely on here?
class StackChangeSet(Stack):
update_graph: NodeTemplate | None
+ change_set_type: ChangeSetType | None
- def __init__(self, account_id: str, region_name: str, stack: Stack, params=None, template=None):
+ def __init__(
+ self,
+ account_id: str,
+ region_name: str,
+ stack: Stack,
+ params=None,
+ template=None,
+ change_set_type: ChangeSetType | None = None,
+ ):
if template is None:
template = {}
if params is None:
@@ -389,6 +402,7 @@ def __init__(self, account_id: str, region_name: str, stack: Stack, params=None,
self.stack = stack
self.metadata["StackId"] = stack.stack_id
self.metadata["Status"] = "CREATE_PENDING"
+ self.change_set_type = change_set_type
@property
def change_set_id(self):
@@ -408,9 +422,17 @@ def changes(self):
return result
# V2 only
- def populate_update_graph(self, before_template: dict | None, after_template: dict | None):
+ def populate_update_graph(
+ self,
+ before_template: Optional[dict],
+ after_template: Optional[dict],
+ before_parameters: Optional[dict],
+ after_parameters: Optional[dict],
+ ) -> None:
change_set_model = ChangeSetModel(
before_template=before_template,
after_template=after_template,
+ before_parameters=before_parameters,
+ after_parameters=after_parameters,
)
self.update_graph = change_set_model.get_update_model()
diff --git a/localstack-core/localstack/services/cloudformation/engine/quirks.py b/localstack-core/localstack/services/cloudformation/engine/quirks.py
index b38056474b560..964d5b603d960 100644
--- a/localstack-core/localstack/services/cloudformation/engine/quirks.py
+++ b/localstack-core/localstack/services/cloudformation/engine/quirks.py
@@ -30,6 +30,9 @@
"AWS::Logs::SubscriptionFilter": "/properties/LogGroupName",
"AWS::RDS::DBProxyTargetGroup": "/properties/TargetGroupName",
"AWS::Glue::SchemaVersionMetadata": "||", # composite
+ "AWS::VerifiedPermissions::IdentitySource": "|", # composite
+ "AWS::VerifiedPermissions::Policy": "|", # composite
+ "AWS::VerifiedPermissions::PolicyTemplate": "|", # composite
"AWS::WAFv2::WebACL": "||",
"AWS::WAFv2::WebACLAssociation": "|",
"AWS::WAFv2::IPSet": "||",
diff --git a/localstack-core/localstack/services/cloudformation/engine/template_deployer.py b/localstack-core/localstack/services/cloudformation/engine/template_deployer.py
index 5bfcf02c5453a..a0ae9c286d61c 100644
--- a/localstack-core/localstack/services/cloudformation/engine/template_deployer.py
+++ b/localstack-core/localstack/services/cloudformation/engine/template_deployer.py
@@ -1409,15 +1409,6 @@ def delete_stack(self):
) # TODO: why is there a fallback?
resource["ResourceType"] = get_resource_type(resource)
- def _safe_lookup_is_deleted(r_id):
- """handles the case where self.stack.resource_status(..) fails for whatever reason"""
- try:
- return self.stack.resource_status(r_id).get("ResourceStatus") == "DELETE_COMPLETE"
- except Exception:
- if config.CFN_VERBOSE_ERRORS:
- LOG.exception("failed to lookup if resource %s is deleted", r_id)
- return True # just an assumption
-
ordered_resource_ids = list(
order_resources(
resources=original_resources,
diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py
index 7601cd0566773..f8adc872cbc2a 100644
--- a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py
+++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py
@@ -113,22 +113,28 @@ class ChangeSetTerminal(ChangeSetEntity, abc.ABC): ...
class NodeTemplate(ChangeSetNode):
+ mappings: Final[NodeMappings]
parameters: Final[NodeParameters]
conditions: Final[NodeConditions]
resources: Final[NodeResources]
+ outputs: Final[NodeOutputs]
def __init__(
self,
scope: Scope,
change_type: ChangeType,
+ mappings: NodeMappings,
parameters: NodeParameters,
conditions: NodeConditions,
resources: NodeResources,
+ outputs: NodeOutputs,
):
super().__init__(scope=scope, change_type=change_type)
+ self.mappings = mappings
self.parameters = parameters
self.conditions = conditions
self.resources = resources
+ self.outputs = outputs
class NodeDivergence(ChangeSetNode):
@@ -143,21 +149,24 @@ def __init__(self, scope: Scope, value: ChangeSetEntity, divergence: ChangeSetEn
class NodeParameter(ChangeSetNode):
name: Final[str]
- value: Final[ChangeSetEntity]
+ type_: Final[ChangeSetEntity]
dynamic_value: Final[ChangeSetEntity]
+ default_value: Final[Optional[ChangeSetEntity]]
def __init__(
self,
scope: Scope,
change_type: ChangeType,
name: str,
- value: ChangeSetEntity,
+ type_: ChangeSetEntity,
dynamic_value: ChangeSetEntity,
+ default_value: Optional[ChangeSetEntity],
):
super().__init__(scope=scope, change_type=change_type)
self.name = name
- self.value = value
+ self.type_ = type_
self.dynamic_value = dynamic_value
+ self.default_value = default_value
class NodeParameters(ChangeSetNode):
@@ -168,6 +177,54 @@ def __init__(self, scope: Scope, change_type: ChangeType, parameters: list[NodeP
self.parameters = parameters
+class NodeMapping(ChangeSetNode):
+ name: Final[str]
+ bindings: Final[NodeObject]
+
+ def __init__(self, scope: Scope, change_type: ChangeType, name: str, bindings: NodeObject):
+ super().__init__(scope=scope, change_type=change_type)
+ self.name = name
+ self.bindings = bindings
+
+
+class NodeMappings(ChangeSetNode):
+ mappings: Final[list[NodeMapping]]
+
+ def __init__(self, scope: Scope, change_type: ChangeType, mappings: list[NodeMapping]):
+ super().__init__(scope=scope, change_type=change_type)
+ self.mappings = mappings
+
+
+class NodeOutput(ChangeSetNode):
+ name: Final[str]
+ value: Final[ChangeSetEntity]
+ export: Final[Optional[ChangeSetEntity]]
+ condition_reference: Final[Optional[TerminalValue]]
+
+ def __init__(
+ self,
+ scope: Scope,
+ change_type: ChangeType,
+ name: str,
+ value: ChangeSetEntity,
+ export: Optional[ChangeSetEntity],
+ conditional_reference: Optional[TerminalValue],
+ ):
+ super().__init__(scope=scope, change_type=change_type)
+ self.name = name
+ self.value = value
+ self.export = export
+ self.condition_reference = conditional_reference
+
+
+class NodeOutputs(ChangeSetNode):
+ outputs: Final[list[NodeOutput]]
+
+ def __init__(self, scope: Scope, change_type: ChangeType, outputs: list[NodeOutput]):
+ super().__init__(scope=scope, change_type=change_type)
+ self.outputs = outputs
+
+
class NodeCondition(ChangeSetNode):
name: Final[str]
body: Final[ChangeSetEntity]
@@ -197,7 +254,7 @@ def __init__(self, scope: Scope, change_type: ChangeType, resources: list[NodeRe
class NodeResource(ChangeSetNode):
name: Final[str]
type_: Final[ChangeSetTerminal]
- condition_reference: Final[TerminalValue]
+ condition_reference: Final[Optional[TerminalValue]]
properties: Final[NodeProperties]
def __init__(
@@ -300,16 +357,29 @@ def __init__(self, scope: Scope, value: Any):
TypeKey: Final[str] = "Type"
ConditionKey: Final[str] = "Condition"
ConditionsKey: Final[str] = "Conditions"
+MappingsKey: Final[str] = "Mappings"
ResourcesKey: Final[str] = "Resources"
PropertiesKey: Final[str] = "Properties"
ParametersKey: Final[str] = "Parameters"
+DefaultKey: Final[str] = "Default"
+ValueKey: Final[str] = "Value"
+ExportKey: Final[str] = "Export"
+OutputsKey: Final[str] = "Outputs"
# TODO: expand intrinsic functions set.
RefKey: Final[str] = "Ref"
FnIf: Final[str] = "Fn::If"
FnNot: Final[str] = "Fn::Not"
FnGetAttKey: Final[str] = "Fn::GetAtt"
FnEqualsKey: Final[str] = "Fn::Equals"
-INTRINSIC_FUNCTIONS: Final[set[str]] = {RefKey, FnIf, FnNot, FnEqualsKey, FnGetAttKey}
+FnFindInMapKey: Final[str] = "Fn::FindInMap"
+INTRINSIC_FUNCTIONS: Final[set[str]] = {
+ RefKey,
+ FnIf,
+ FnNot,
+ FnEqualsKey,
+ FnGetAttKey,
+ FnFindInMapKey,
+}
class ChangeSetModel:
@@ -449,12 +519,42 @@ def _resolve_intrinsic_function_ref(self, arguments: ChangeSetEntity) -> ChangeT
node_parameter = self._retrieve_parameter_if_exists(parameter_name=logical_id)
if isinstance(node_parameter, NodeParameter):
- return node_parameter.dynamic_value.change_type
+ return node_parameter.change_type
# TODO: this should check the replacement flag for a resource update.
node_resource = self._retrieve_or_visit_resource(resource_name=logical_id)
return node_resource.change_type
+ def _resolve_intrinsic_function_fn_find_in_map(self, arguments: ChangeSetEntity) -> ChangeType:
+ if arguments.change_type != ChangeType.UNCHANGED:
+ return arguments.change_type
+ # TODO: validate arguments structure and type.
+ # TODO: add support for nested functions, here we assume the arguments are string literals.
+
+ if not isinstance(arguments, NodeArray) or not arguments.array:
+ raise RuntimeError()
+ argument_mapping_name = arguments.array[0]
+ if not isinstance(argument_mapping_name, TerminalValue):
+ raise NotImplementedError()
+ argument_top_level_key = arguments.array[1]
+ if not isinstance(argument_top_level_key, TerminalValue):
+ raise NotImplementedError()
+ argument_second_level_key = arguments.array[2]
+ if not isinstance(argument_second_level_key, TerminalValue):
+ raise NotImplementedError()
+ mapping_name = argument_mapping_name.value
+ top_level_key = argument_top_level_key.value
+ second_level_key = argument_second_level_key.value
+
+ node_mapping = self._retrieve_mapping(mapping_name=mapping_name)
+ # TODO: a lookup would be beneficial in this scenario too;
+ # consider implications downstream and for replication.
+ top_level_object = node_mapping.bindings.bindings.get(top_level_key)
+ if not isinstance(top_level_object, NodeObject):
+ raise RuntimeError()
+ target_map_value = top_level_object.bindings.get(second_level_key)
+ return target_map_value.change_type
+
def _resolve_intrinsic_function_fn_if(self, arguments: ChangeSetEntity) -> ChangeType:
# TODO: validate arguments structure and type.
if not isinstance(arguments, NodeArray) or not arguments.array:
@@ -478,19 +578,16 @@ def _resolve_intrinsic_function_fn_if(self, arguments: ChangeSetEntity) -> Chang
def _visit_array(
self, scope: Scope, before_array: Maybe[list], after_array: Maybe[list]
) -> NodeArray:
- change_type = ChangeType.UNCHANGED
array: list[ChangeSetEntity] = list()
for index, (before_value, after_value) in enumerate(
zip_longest(before_array, after_array, fillvalue=Nothing)
):
- # TODO: should extract this scoping logic.
value_scope = scope.open_index(index=index)
value = self._visit_value(
scope=value_scope, before_value=before_value, after_value=after_value
)
array.append(value)
- if value.change_type != ChangeType.UNCHANGED:
- change_type = ChangeType.MODIFIED
+ change_type = self._change_type_for_parent_of([value.change_type for value in array])
return NodeArray(scope=scope, change_type=change_type, array=array)
def _visit_object(
@@ -507,17 +604,9 @@ def _visit_object(
binding_scope, (before_value, after_value) = self._safe_access_in(
scope, binding_name, before_object, after_object
)
- if self._is_intrinsic_function_name(function_name=binding_name):
- value = self._visit_intrinsic_function(
- scope=binding_scope,
- intrinsic_function=binding_name,
- before_arguments=before_value,
- after_arguments=after_value,
- )
- else:
- value = self._visit_value(
- scope=binding_scope, before_value=before_value, after_value=after_value
- )
+ value = self._visit_value(
+ scope=binding_scope, before_value=before_value, after_value=after_value
+ )
bindings[binding_name] = value
change_type = change_type.for_child(value.change_type)
node_object = NodeObject(scope=scope, change_type=change_type, bindings=bindings)
@@ -541,8 +630,11 @@ def _visit_value(
value = self._visited_scopes.get(scope)
if isinstance(value, ChangeSetEntity):
return value
+
+ before_type_name = self._type_name_of(before_value)
+ after_type_name = self._type_name_of(after_value)
unset = object()
- if type(before_value) is type(after_value):
+ if before_type_name == after_type_name:
dominant_value = before_value
elif self._is_created(before=before_value, after=after_value):
dominant_value = after_value
@@ -551,6 +643,7 @@ def _visit_value(
else:
dominant_value = unset
if dominant_value is not unset:
+ dominant_type_name = self._type_name_of(dominant_value)
if self._is_terminal(value=dominant_value):
value = self._visit_terminal_value(
scope=scope, before_value=before_value, after_value=after_value
@@ -563,6 +656,16 @@ def _visit_value(
value = self._visit_array(
scope=scope, before_array=before_value, after_array=after_value
)
+ elif self._is_intrinsic_function_name(dominant_type_name):
+ intrinsic_function_scope, (before_arguments, after_arguments) = (
+ self._safe_access_in(scope, dominant_type_name, before_value, after_value)
+ )
+ value = self._visit_intrinsic_function(
+ scope=scope,
+ intrinsic_function=dominant_type_name,
+ before_arguments=before_arguments,
+ after_arguments=after_arguments,
+ )
else:
raise RuntimeError(f"Unsupported type {type(dominant_value)}")
# Case: type divergence.
@@ -583,28 +686,12 @@ def _visit_property(
node_property = self._visited_scopes.get(scope)
if isinstance(node_property, NodeProperty):
return node_property
-
- if self._is_created(before=before_property, after=after_property):
- node_property = NodeProperty(
- scope=scope,
- change_type=ChangeType.CREATED,
- name=property_name,
- value=TerminalValueCreated(scope=scope, value=after_property),
- )
- elif self._is_removed(before=before_property, after=after_property):
- node_property = NodeProperty(
- scope=scope,
- change_type=ChangeType.REMOVED,
- name=property_name,
- value=TerminalValueRemoved(scope=scope, value=before_property),
- )
- else:
- value = self._visit_value(
- scope=scope, before_value=before_property, after_value=after_property
- )
- node_property = NodeProperty(
- scope=scope, change_type=value.change_type, name=property_name, value=value
- )
+ value = self._visit_value(
+ scope=scope, before_value=before_property, after_value=after_property
+ )
+ node_property = NodeProperty(
+ scope=scope, change_type=value.change_type, name=property_name, value=value
+ )
self._visited_scopes[scope] = node_property
return node_property
@@ -636,6 +723,13 @@ def _visit_properties(
self._visited_scopes[scope] = node_properties
return node_properties
+ def _visit_type(self, scope: Scope, before_type: Any, after_type: Any) -> TerminalValue:
+ value = self._visit_value(scope=scope, before_value=before_type, after_value=after_type)
+ if not isinstance(value, TerminalValue):
+ # TODO: decide where template schema validation should occur.
+ raise RuntimeError()
+ return value
+
def _visit_resource(
self,
scope: Scope,
@@ -654,15 +748,22 @@ def _visit_resource(
else:
change_type = ChangeType.UNCHANGED
- # TODO: investigate behaviour with type changes, for now this is filler code.
- _, type_str = self._safe_access_in(scope, TypeKey, before_resource)
+ scope_type, (before_type, after_type) = self._safe_access_in(
+ scope, TypeKey, before_resource, after_resource
+ )
+ terminal_value_type = self._visit_type(
+ scope=scope_type, before_type=before_type, after_type=after_type
+ )
+ condition_reference = None
scope_condition, (before_condition, after_condition) = self._safe_access_in(
scope, ConditionKey, before_resource, after_resource
)
- condition_reference = self._visit_terminal_value(
- scope_condition, before_condition, after_condition
- )
+ # TODO: condition references should be resolved for the condition's change_type?
+ if before_condition or after_condition:
+ condition_reference = self._visit_terminal_value(
+ scope_condition, before_condition, after_condition
+ )
scope_properties, (before_properties, after_properties) = self._safe_access_in(
scope, PropertiesKey, before_resource, after_resource
@@ -672,12 +773,15 @@ def _visit_resource(
before_properties=before_properties,
after_properties=after_properties,
)
- change_type = change_type.for_child(properties.change_type)
+ if properties.properties:
+ # Properties were defined in the before or after template, thus must play a role
+ # in affecting the change type of this resource.
+ change_type = change_type.for_child(properties.change_type)
node_resource = NodeResource(
scope=scope,
change_type=change_type,
name=resource_name,
- type_=TerminalValueUnchanged(scope=scope, value=type_str),
+ type_=terminal_value_type,
condition_reference=condition_reference,
properties=properties,
)
@@ -705,6 +809,36 @@ def _visit_resources(
change_type = change_type.for_child(resource.change_type)
return NodeResources(scope=scope, change_type=change_type, resources=resources)
+ def _visit_mapping(
+ self, scope: Scope, name: str, before_mapping: Maybe[dict], after_mapping: Maybe[dict]
+ ) -> NodeMapping:
+ bindings = self._visit_object(
+ scope=scope, before_object=before_mapping, after_object=after_mapping
+ )
+ return NodeMapping(
+ scope=scope, change_type=bindings.change_type, name=name, bindings=bindings
+ )
+
+ def _visit_mappings(
+ self, scope: Scope, before_mappings: Maybe[dict], after_mappings: Maybe[dict]
+ ) -> NodeMappings:
+ change_type = ChangeType.UNCHANGED
+ mappings: list[NodeMapping] = list()
+ mapping_names = self._safe_keys_of(before_mappings, after_mappings)
+ for mapping_name in mapping_names:
+ scope_mapping, (before_mapping, after_mapping) = self._safe_access_in(
+ scope, mapping_name, before_mappings, after_mappings
+ )
+ mapping = self._visit_mapping(
+ scope=scope,
+ name=mapping_name,
+ before_mapping=before_mapping,
+ after_mapping=after_mapping,
+ )
+ mappings.append(mapping)
+ change_type = change_type.for_child(mapping.change_type)
+ return NodeMappings(scope=scope, change_type=change_type, mappings=mappings)
+
def _visit_dynamic_parameter(self, parameter_name: str) -> ChangeSetEntity:
scope = Scope("Dynamic").open_scope("Parameters")
scope_parameter, (before_parameter, after_parameter) = self._safe_access_in(
@@ -725,38 +859,31 @@ def _visit_parameter(
node_parameter = self._visited_scopes.get(scope)
if isinstance(node_parameter, NodeParameter):
return node_parameter
- # TODO: add logic to compute defaults already in the graph building process?
+
+ type_scope, (before_type, after_type) = self._safe_access_in(
+ scope, TypeKey, before_parameter, after_parameter
+ )
+ type_ = self._visit_value(type_scope, before_type, after_type)
+
+ default_scope, (before_default, after_default) = self._safe_access_in(
+ scope, DefaultKey, before_parameter, after_parameter
+ )
+ default_value = self._visit_value(default_scope, before_default, after_default)
+
dynamic_value = self._visit_dynamic_parameter(parameter_name=parameter_name)
- if self._is_created(before=before_parameter, after=after_parameter):
- node_parameter = NodeParameter(
- scope=scope,
- change_type=ChangeType.CREATED,
- name=parameter_name,
- value=TerminalValueCreated(scope=scope, value=after_parameter),
- dynamic_value=dynamic_value,
- )
- elif self._is_removed(before=before_parameter, after=after_parameter):
- node_parameter = NodeParameter(
- scope=scope,
- change_type=ChangeType.REMOVED,
- name=parameter_name,
- value=TerminalValueRemoved(scope=scope, value=before_parameter),
- dynamic_value=dynamic_value,
- )
- else:
- value = self._visit_value(
- scope=scope, before_value=before_parameter, after_value=after_parameter
- )
- change_type = self._change_type_for_parent_of(
- change_types=[dynamic_value.change_type, value.change_type]
- )
- node_parameter = NodeParameter(
- scope=scope,
- change_type=change_type,
- name=parameter_name,
- value=value,
- dynamic_value=dynamic_value,
- )
+
+ change_type = self._change_type_for_parent_of(
+ change_types=[type_.change_type, default_value.change_type, dynamic_value.change_type]
+ )
+
+ node_parameter = NodeParameter(
+ scope=scope,
+ change_type=change_type,
+ name=parameter_name,
+ type_=type_,
+ default_value=default_value,
+ dynamic_value=dynamic_value,
+ )
self._visited_scopes[scope] = node_parameter
return node_parameter
@@ -797,18 +924,9 @@ def _visit_condition(
node_condition = self._visited_scopes.get(scope)
if isinstance(node_condition, NodeCondition):
return node_condition
-
- # TODO: is schema validation/check necessary or can we trust the input at this point?
- function_names: list[str] = self._safe_keys_of(before_condition, after_condition)
- if len(function_names) == 1:
- body = self._visit_object(
- scope=scope, before_object=before_condition, after_object=after_condition
- )
- else:
- body = self._visit_divergence(
- scope=scope, before_value=before_condition, after_value=after_condition
- )
-
+ body = self._visit_value(
+ scope=scope, before_value=before_condition, after_value=after_condition
+ )
node_condition = NodeCondition(
scope=scope, change_type=body.change_type, name=condition_name, body=body
)
@@ -842,9 +960,75 @@ def _visit_conditions(
self._visited_scopes[scope] = node_conditions
return node_conditions
+ def _visit_output(
+ self, scope: Scope, name: str, before_output: Maybe[dict], after_output: Maybe[dict]
+ ) -> NodeOutput:
+ change_type = ChangeType.UNCHANGED
+ scope_value, (before_value, after_value) = self._safe_access_in(
+ scope, ValueKey, before_output, after_output
+ )
+ value = self._visit_value(scope_value, before_value, after_value)
+ change_type = change_type.for_child(value.change_type)
+
+ export: Optional[ChangeSetEntity] = None
+ scope_export, (before_export, after_export) = self._safe_access_in(
+ scope, ExportKey, before_output, after_output
+ )
+ if before_export or after_export:
+ export = self._visit_value(scope_export, before_export, after_export)
+ change_type = change_type.for_child(export.change_type)
+
+ # TODO: condition references should be resolved for the condition's change_type?
+ condition_reference: Optional[TerminalValue] = None
+ scope_condition, (before_condition, after_condition) = self._safe_access_in(
+ scope, ConditionKey, before_output, after_output
+ )
+ if before_condition or after_condition:
+ condition_reference = self._visit_terminal_value(
+ scope_condition, before_condition, after_condition
+ )
+ change_type = change_type.for_child(condition_reference.change_type)
+
+ return NodeOutput(
+ scope=scope,
+ change_type=change_type,
+ name=name,
+ value=value,
+ export=export,
+ conditional_reference=condition_reference,
+ )
+
+ def _visit_outputs(
+ self, scope: Scope, before_outputs: Maybe[dict], after_outputs: Maybe[dict]
+ ) -> NodeOutputs:
+ change_type = ChangeType.UNCHANGED
+ outputs: list[NodeOutput] = list()
+ output_names: list[str] = self._safe_keys_of(before_outputs, after_outputs)
+ for output_name in output_names:
+ scope_output, (before_output, after_output) = self._safe_access_in(
+ scope, output_name, before_outputs, after_outputs
+ )
+ output = self._visit_output(
+ scope=scope_output,
+ name=output_name,
+ before_output=before_output,
+ after_output=after_output,
+ )
+ outputs.append(output)
+ change_type = change_type.for_child(output.change_type)
+ return NodeOutputs(scope=scope, change_type=change_type, outputs=outputs)
+
def _model(self, before_template: Maybe[dict], after_template: Maybe[dict]) -> NodeTemplate:
root_scope = Scope()
# TODO: visit other child types
+
+ mappings_scope, (before_mappings, after_mappings) = self._safe_access_in(
+ root_scope, MappingsKey, before_template, after_template
+ )
+ mappings = self._visit_mappings(
+ scope=mappings_scope, before_mappings=before_mappings, after_mappings=after_mappings
+ )
+
parameters_scope, (before_parameters, after_parameters) = self._safe_access_in(
root_scope, ParametersKey, before_template, after_template
)
@@ -872,13 +1056,22 @@ def _model(self, before_template: Maybe[dict], after_template: Maybe[dict]) -> N
after_resources=after_resources,
)
+ outputs_scope, (before_outputs, after_outputs) = self._safe_access_in(
+ root_scope, OutputsKey, before_template, after_template
+ )
+ outputs = self._visit_outputs(
+ scope=outputs_scope, before_outputs=before_outputs, after_outputs=after_outputs
+ )
+
# TODO: compute the change_type of the template properly.
return NodeTemplate(
scope=root_scope,
change_type=resources.change_type,
+ mappings=mappings,
parameters=parameters,
conditions=conditions,
resources=resources,
+ outputs=outputs,
)
def _retrieve_condition_if_exists(self, condition_name: str) -> Optional[NodeCondition]:
@@ -919,6 +1112,23 @@ def _retrieve_parameter_if_exists(self, parameter_name: str) -> Optional[NodePar
return node_parameter
return None
+ def _retrieve_mapping(self, mapping_name) -> NodeMapping:
+ # TODO: add caching mechanism, and raise appropriate error if missing.
+ scope_mappings, (before_mappings, after_mappings) = self._safe_access_in(
+ Scope(), MappingsKey, self._before_template, self._after_template
+ )
+ before_mappings = before_mappings or dict()
+ after_mappings = after_mappings or dict()
+ if mapping_name in before_mappings or mapping_name in after_mappings:
+ scope_mapping, (before_mapping, after_mapping) = self._safe_access_in(
+ scope_mappings, mapping_name, before_mappings, after_mappings
+ )
+ node_mapping = self._visit_mapping(
+ scope_mapping, mapping_name, before_mapping, after_mapping
+ )
+ return node_mapping
+ raise RuntimeError()
+
def _retrieve_or_visit_resource(self, resource_name: str) -> NodeResource:
resources_scope, (before_resources, after_resources) = self._safe_access_in(
Scope(),
@@ -974,13 +1184,30 @@ def _change_type_for_parent_of(change_types: list[ChangeType]) -> ChangeType:
break
return parent_change_type
+ @staticmethod
+ def _name_if_intrinsic_function(value: Maybe[Any]) -> Optional[str]:
+ if isinstance(value, dict):
+ keys = ChangeSetModel._safe_keys_of(value)
+ if len(keys) == 1:
+ key_name = keys[0]
+ if ChangeSetModel._is_intrinsic_function_name(key_name):
+ return key_name
+ return None
+
+ @staticmethod
+ def _type_name_of(value: Maybe[Any]) -> str:
+ maybe_intrinsic_function_name = ChangeSetModel._name_if_intrinsic_function(value)
+ if maybe_intrinsic_function_name is not None:
+ return maybe_intrinsic_function_name
+ return type(value).__name__
+
@staticmethod
def _is_terminal(value: Any) -> bool:
return type(value) in {int, float, bool, str, None, NothingType}
@staticmethod
def _is_object(value: Any) -> bool:
- return isinstance(value, dict)
+ return isinstance(value, dict) and ChangeSetModel._name_if_intrinsic_function(value) is None
@staticmethod
def _is_array(value: Any) -> bool:
diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py
index 9301af7729899..cf7f4330923c3 100644
--- a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py
+++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py
@@ -1,416 +1,179 @@
from __future__ import annotations
-import abc
-from typing import Any, Final, Optional
+import json
+from typing import Final, Optional
import localstack.aws.api.cloudformation as cfn_api
from localstack.services.cloudformation.engine.v2.change_set_model import (
- ChangeSetEntity,
- ChangeType,
- NodeArray,
- NodeCondition,
- NodeDivergence,
NodeIntrinsicFunction,
- NodeObject,
- NodeParameter,
- NodeProperties,
- NodeProperty,
NodeResource,
NodeTemplate,
- NothingType,
PropertiesKey,
- Scope,
- TerminalValue,
- TerminalValueCreated,
- TerminalValueModified,
- TerminalValueRemoved,
- TerminalValueUnchanged,
)
-from localstack.services.cloudformation.engine.v2.change_set_model_visitor import (
- ChangeSetModelVisitor,
+from localstack.services.cloudformation.engine.v2.change_set_model_preproc import (
+ ChangeSetModelPreproc,
+ PreprocEntityDelta,
+ PreprocProperties,
+ PreprocResource,
)
CHANGESET_KNOWN_AFTER_APPLY: Final[str] = "{{changeSet:KNOWN_AFTER_APPLY}}"
-class DescribeUnit(abc.ABC):
- before_context: Optional[Any] = None
- after_context: Optional[Any] = None
-
- def __init__(self, before_context: Optional[Any] = None, after_context: Optional[Any] = None):
- self.before_context = before_context
- self.after_context = after_context
-
-
-class ChangeSetModelDescriber(ChangeSetModelVisitor):
- _node_template: Final[NodeTemplate]
+class ChangeSetModelDescriber(ChangeSetModelPreproc):
+ _include_property_values: Final[bool]
_changes: Final[cfn_api.Changes]
- _describe_unit_cache: dict[Scope, DescribeUnit]
- def __init__(self, node_template: NodeTemplate):
- self._node_template = node_template
+ def __init__(
+ self,
+ node_template: NodeTemplate,
+ before_resolved_resources: dict,
+ include_property_values: bool,
+ ):
+ super().__init__(
+ node_template=node_template, before_resolved_resources=before_resolved_resources
+ )
+ self._include_property_values = include_property_values
self._changes = list()
- self._describe_unit_cache = dict()
- self.visit(self._node_template)
def get_changes(self) -> cfn_api.Changes:
+ self._changes.clear()
+ self.process()
return self._changes
- @staticmethod
- def _get_node_resource_for(resource_name: str, node_template: NodeTemplate) -> NodeResource:
- # TODO: this could be improved with hashmap lookups if the Node contained bindings and not lists.
- for node_resource in node_template.resources.resources:
- if node_resource.name == resource_name:
- return node_resource
- # TODO
- raise RuntimeError()
-
- @staticmethod
- def _get_node_property_for(property_name: str, node_resource: NodeResource) -> NodeProperty:
- # TODO: this could be improved with hashmap lookups if the Node contained bindings and not lists.
- for node_property in node_resource.properties.properties:
- if node_property.name == property_name:
- return node_property
- # TODO
- raise RuntimeError()
-
- def _get_node_parameter_if_exists(self, parameter_name: str) -> Optional[NodeParameter]:
- parameters: list[NodeParameter] = self._node_template.parameters.parameters
- # TODO: another scenarios suggesting property lookups might be preferable.
- for parameter in parameters:
- if parameter.name == parameter_name:
- return parameter
- return None
-
- def _get_node_condition_if_exists(self, condition_name: str) -> Optional[NodeCondition]:
- conditions: list[NodeCondition] = self._node_template.conditions.conditions
- # TODO: another scenarios suggesting property lookups might be preferable.
- for condition in conditions:
- if condition.name == condition_name:
- return condition
- return None
-
- def _resolve_reference(self, logica_id: str) -> DescribeUnit:
- node_condition = self._get_node_condition_if_exists(condition_name=logica_id)
- if isinstance(node_condition, NodeCondition):
- condition_unit = self.visit(node_condition)
- return condition_unit
-
- node_parameter = self._get_node_parameter_if_exists(parameter_name=logica_id)
- if isinstance(node_parameter, NodeParameter):
- parameter_unit = self.visit(node_parameter)
- return parameter_unit
-
- # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
- # node_resource = self._get_node_resource_for(
- # resource_name=logica_id, node_template=self._node_template
- # )
- limitation_str = "Cannot yet compute Ref values for Resources"
- resource_unit = DescribeUnit(before_context=limitation_str, after_context=limitation_str)
- return resource_unit
-
- def _resolve_reference_binding(
- self, before_logical_id: str, after_logical_id: str
- ) -> DescribeUnit:
- before_unit = self._resolve_reference(logica_id=before_logical_id)
- after_unit = self._resolve_reference(logica_id=after_logical_id)
- return DescribeUnit(
- before_context=before_unit.before_context, after_context=after_unit.after_context
- )
-
- def visit(self, change_set_entity: ChangeSetEntity) -> DescribeUnit:
- describe_unit = self._describe_unit_cache.get(change_set_entity.scope)
- if describe_unit is not None:
- return describe_unit
- describe_unit = super().visit(change_set_entity=change_set_entity)
- self._describe_unit_cache[change_set_entity.scope] = describe_unit
- return describe_unit
-
- def visit_terminal_value_modified(
- self, terminal_value_modified: TerminalValueModified
- ) -> DescribeUnit:
- return DescribeUnit(
- before_context=terminal_value_modified.value,
- after_context=terminal_value_modified.modified_value,
- )
-
- def visit_terminal_value_created(
- self, terminal_value_created: TerminalValueCreated
- ) -> DescribeUnit:
- return DescribeUnit(after_context=terminal_value_created.value)
-
- def visit_terminal_value_removed(
- self, terminal_value_removed: TerminalValueRemoved
- ) -> DescribeUnit:
- return DescribeUnit(before_context=terminal_value_removed.value)
-
- def visit_terminal_value_unchanged(
- self, terminal_value_unchanged: TerminalValueUnchanged
- ) -> DescribeUnit:
- return DescribeUnit(
- before_context=terminal_value_unchanged.value,
- after_context=terminal_value_unchanged.value,
- )
-
- def visit_node_divergence(self, node_divergence: NodeDivergence) -> DescribeUnit:
- before_unit = self.visit(node_divergence.value)
- after_unit = self.visit(node_divergence.divergence)
- return DescribeUnit(
- before_context=before_unit.before_context, after_context=after_unit.after_context
- )
-
- def visit_node_object(self, node_object: NodeObject) -> DescribeUnit:
- # TODO: improve check syntax
- if len(node_object.bindings) == 1:
- binding_values = list(node_object.bindings.values())
- unique_value = binding_values[0]
- if isinstance(unique_value, NodeIntrinsicFunction):
- return self.visit(unique_value)
-
- before_context = dict()
- after_context = dict()
- for name, change_set_entity in node_object.bindings.items():
- describe_unit: DescribeUnit = self.visit(change_set_entity=change_set_entity)
- match change_set_entity.change_type:
- case ChangeType.MODIFIED:
- before_context[name] = describe_unit.before_context
- after_context[name] = describe_unit.after_context
- case ChangeType.CREATED:
- after_context[name] = describe_unit.after_context
- case ChangeType.REMOVED:
- before_context[name] = describe_unit.before_context
- case ChangeType.UNCHANGED:
- before_context[name] = describe_unit.before_context
- after_context[name] = describe_unit.before_context
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
def visit_node_intrinsic_function_fn_get_att(
self, node_intrinsic_function: NodeIntrinsicFunction
- ) -> DescribeUnit:
- arguments_unit = self.visit(node_intrinsic_function.arguments)
- # TODO: validate the return value according to the spec.
- before_argument_list = arguments_unit.before_context
- before_logical_name_of_resource = before_argument_list[0]
- before_attribute_name = before_argument_list[1]
- before_node_resource = self._get_node_resource_for(
- resource_name=before_logical_name_of_resource, node_template=self._node_template
- )
- node_property: TerminalValue = self._get_node_property_for(
- property_name=before_attribute_name, node_resource=before_node_resource
- )
-
- before_context = node_property.value.value
- if node_property.change_type != ChangeType.UNCHANGED:
- after_context = CHANGESET_KNOWN_AFTER_APPLY
- else:
- after_context = node_property.value.value
-
- match node_intrinsic_function.change_type:
- case ChangeType.MODIFIED:
- return DescribeUnit(before_context=before_context, after_context=after_context)
- case ChangeType.CREATED:
- return DescribeUnit(after_context=after_context)
- case ChangeType.REMOVED:
- return DescribeUnit(before_context=before_context)
- # Unchanged
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_intrinsic_function_fn_equals(
- self, node_intrinsic_function: NodeIntrinsicFunction
- ) -> DescribeUnit:
- # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
- arguments_unit = self.visit(node_intrinsic_function.arguments)
- before_values = arguments_unit.before_context
- after_values = arguments_unit.after_context
- before_context = None
- if before_values:
- before_context = before_values[0] == before_values[1]
- after_context = None
- if after_values:
- after_context = after_values[0] == after_values[1]
- match node_intrinsic_function.change_type:
- case ChangeType.MODIFIED:
- return DescribeUnit(before_context=before_context, after_context=after_context)
- case ChangeType.CREATED:
- return DescribeUnit(after_context=after_context)
- case ChangeType.REMOVED:
- return DescribeUnit(before_context=before_context)
- # Unchanged
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_intrinsic_function_fn_if(
- self, node_intrinsic_function: NodeIntrinsicFunction
- ) -> DescribeUnit:
- # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
- arguments_unit = self.visit(node_intrinsic_function.arguments)
-
- def _compute_unit_for_if_statement(args: list[Any]) -> DescribeUnit:
- condition_name = args[0]
- boolean_expression_unit = self._resolve_reference(logica_id=condition_name)
- return DescribeUnit(
- before_context=args[1] if boolean_expression_unit.before_context else args[2],
- after_context=args[1] if boolean_expression_unit.after_context else args[2],
+ ) -> PreprocEntityDelta:
+ # TODO: If we can properly compute the before and after value, why should we
+ # artificially limit the precision of our output to match AWS's?
+
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+ before_argument_list = arguments_delta.before
+ after_argument_list = arguments_delta.after
+
+ before = None
+ if before_argument_list:
+ before_logical_name_of_resource = before_argument_list[0]
+ before_attribute_name = before_argument_list[1]
+ before_node_resource = self._get_node_resource_for(
+ resource_name=before_logical_name_of_resource, node_template=self._node_template
)
-
- # TODO: add support for this being created or removed.
- before_outcome_unit = _compute_unit_for_if_statement(arguments_unit.before_context)
- before_context = before_outcome_unit.before_context
- after_outcome_unit = _compute_unit_for_if_statement(arguments_unit.after_context)
- after_context = after_outcome_unit.after_context
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_intrinsic_function_fn_not(
- self, node_intrinsic_function: NodeIntrinsicFunction
- ) -> DescribeUnit:
- # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
- # TODO: add type checking/validation for result unit?
- arguments_unit = self.visit(node_intrinsic_function.arguments)
- before_condition = arguments_unit.before_context
- after_condition = arguments_unit.after_context
- if before_condition:
- before_condition_outcome = before_condition[0]
- before_context = not before_condition_outcome
- else:
- before_context = None
-
- if after_condition:
- after_condition_outcome = after_condition[0]
- after_context = not after_condition_outcome
- else:
- after_context = None
- # Implicit change type computation.
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_parameter(self, node_parameter: NodeParameter) -> DescribeUnit:
- # TODO: add caching for these operation, parameters may be referenced more than once.
- # TODO: add support for default value sampling
- dynamic_value = node_parameter.dynamic_value
- describe_unit = self.visit(dynamic_value)
- return describe_unit
-
- def visit_node_condition(self, node_condition: NodeCondition) -> DescribeUnit:
- describe_unit = self.visit(node_condition.body)
- return describe_unit
-
- def visit_node_intrinsic_function_ref(
- self, node_intrinsic_function: NodeIntrinsicFunction
- ) -> DescribeUnit:
- arguments_unit = self.visit(node_intrinsic_function.arguments)
-
- # TODO: add tests with created and deleted parameters and verify this logic holds.
- before_logical_id = arguments_unit.before_context
- before_unit = self._resolve_reference(logica_id=before_logical_id)
- before_context = before_unit.before_context
-
- after_logical_id = arguments_unit.after_context
- after_unit = self._resolve_reference(logica_id=after_logical_id)
- after_context = after_unit.after_context
-
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_array(self, node_array: NodeArray) -> DescribeUnit:
- before_context = list()
- after_context = list()
- for change_set_entity in node_array.array:
- describe_unit: DescribeUnit = self.visit(change_set_entity=change_set_entity)
- match change_set_entity.change_type:
- case ChangeType.MODIFIED:
- before_context.append(describe_unit.before_context)
- after_context.append(describe_unit.after_context)
- case ChangeType.CREATED:
- after_context.append(describe_unit.after_context)
- case ChangeType.REMOVED:
- before_context.append(describe_unit.before_context)
- case ChangeType.UNCHANGED:
- before_context.append(describe_unit.before_context)
- after_context.append(describe_unit.before_context)
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_properties(self, node_properties: NodeProperties) -> DescribeUnit:
- before_context: dict[str, Any] = dict()
- after_context: dict[str, Any] = dict()
- for node_property in node_properties.properties:
- describe_unit = self.visit(node_property.value)
- property_name = node_property.name
- match node_property.change_type:
- case ChangeType.MODIFIED:
- before_context[property_name] = describe_unit.before_context
- after_context[property_name] = describe_unit.after_context
- case ChangeType.CREATED:
- after_context[property_name] = describe_unit.after_context
- case ChangeType.REMOVED:
- before_context[property_name] = describe_unit.before_context
- case ChangeType.UNCHANGED:
- before_context[property_name] = describe_unit.before_context
- after_context[property_name] = describe_unit.before_context
- # TODO: this object can probably be well-typed instead of a free dict(?)
- before_context = {PropertiesKey: before_context}
- after_context = {PropertiesKey: after_context}
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def _resolve_resource_condition_reference(self, reference: TerminalValue) -> DescribeUnit:
- reference_unit = self.visit(reference)
- before_reference = reference_unit.before_context
- after_reference = reference_unit.after_context
- condition_unit = self._resolve_reference_binding(
- before_logical_id=before_reference, after_logical_id=after_reference
- )
- before_context = (
- condition_unit.before_context if not isinstance(before_reference, NothingType) else True
- )
- after_context = (
- condition_unit.after_context if not isinstance(after_reference, NothingType) else True
- )
- return DescribeUnit(before_context=before_context, after_context=after_context)
-
- def visit_node_resource(self, node_resource: NodeResource) -> DescribeUnit:
- condition_unit = self._resolve_resource_condition_reference(
- node_resource.condition_reference
- )
- condition_before = condition_unit.before_context
- condition_after = condition_unit.after_context
- if not condition_before and condition_after:
- change_type = ChangeType.CREATED
- elif condition_before and not condition_after:
- change_type = ChangeType.REMOVED
- else:
- change_type = node_resource.change_type
- if change_type == ChangeType.UNCHANGED:
- # TODO
- return None
+ before_node_property = self._get_node_property_for(
+ property_name=before_attribute_name, node_resource=before_node_resource
+ )
+ before_property_delta = self.visit(before_node_property)
+ before = before_property_delta.before
+
+ after = None
+ if after_argument_list:
+ after_logical_name_of_resource = after_argument_list[0]
+ after_attribute_name = after_argument_list[1]
+ after_node_resource = self._get_node_resource_for(
+ resource_name=after_logical_name_of_resource, node_template=self._node_template
+ )
+ after_node_property = self._get_node_property_for(
+ property_name=after_attribute_name, node_resource=after_node_resource
+ )
+ after_property_delta = self.visit(after_node_property)
+ if after_property_delta.before == after_property_delta.after:
+ after = after_property_delta.after
+ else:
+ after = CHANGESET_KNOWN_AFTER_APPLY
+
+ return PreprocEntityDelta(before=before, after=after)
+
+ def _register_resource_change(
+ self,
+ logical_id: str,
+ type_: str,
+ physical_id: Optional[str],
+ before_properties: Optional[PreprocProperties],
+ after_properties: Optional[PreprocProperties],
+ ) -> None:
+ action = cfn_api.ChangeAction.Modify
+ if before_properties is None:
+ action = cfn_api.ChangeAction.Add
+ elif after_properties is None:
+ action = cfn_api.ChangeAction.Remove
resource_change = cfn_api.ResourceChange()
- resource_change["LogicalResourceId"] = node_resource.name
-
- # TODO: investigate effects on type changes
- type_describe_unit = self.visit(node_resource.type_)
- resource_change["ResourceType"] = (
- type_describe_unit.before_context or type_describe_unit.after_context
- )
-
- properties_describe_unit = self.visit(node_resource.properties)
- match change_type:
- case ChangeType.MODIFIED:
- resource_change["Action"] = cfn_api.ChangeAction.Modify
- resource_change["BeforeContext"] = properties_describe_unit.before_context
- resource_change["AfterContext"] = properties_describe_unit.after_context
- case ChangeType.CREATED:
- resource_change["Action"] = cfn_api.ChangeAction.Add
- resource_change["AfterContext"] = properties_describe_unit.after_context
- case ChangeType.REMOVED:
- resource_change["Action"] = cfn_api.ChangeAction.Remove
- resource_change["BeforeContext"] = properties_describe_unit.before_context
-
+ resource_change["Action"] = action
+ resource_change["LogicalResourceId"] = logical_id
+ resource_change["ResourceType"] = type_
+ if physical_id:
+ resource_change["PhysicalResourceId"] = physical_id
+ if self._include_property_values and before_properties is not None:
+ before_context_properties = {PropertiesKey: before_properties.properties}
+ before_context_properties_json_str = json.dumps(before_context_properties)
+ resource_change["BeforeContext"] = before_context_properties_json_str
+ if self._include_property_values and after_properties is not None:
+ after_context_properties = {PropertiesKey: after_properties.properties}
+ after_context_properties_json_str = json.dumps(after_context_properties)
+ resource_change["AfterContext"] = after_context_properties_json_str
self._changes.append(
cfn_api.Change(Type=cfn_api.ChangeType.Resource, ResourceChange=resource_change)
)
- # TODO
- return None
+ def _describe_resource_change(
+ self, name: str, before: Optional[PreprocResource], after: Optional[PreprocResource]
+ ) -> None:
+ if before == after:
+ # unchanged: nothing to do.
+ return
+ if before is not None and after is not None:
+ # Case: change on same type.
+ if before.resource_type == after.resource_type:
+ # Register a Modified if changed.
+ self._register_resource_change(
+ logical_id=name,
+ physical_id=before.physical_resource_id,
+ type_=before.resource_type,
+ before_properties=before.properties,
+ after_properties=after.properties,
+ )
+ # Case: type migration.
+ # TODO: Add test to assert that on type change the resources are replaced.
+ else:
+ # Register a Removed for the previous type.
+ self._register_resource_change(
+ logical_id=name,
+ physical_id=before.physical_resource_id,
+ type_=before.resource_type,
+ before_properties=before.properties,
+ after_properties=None,
+ )
+ # Register a Create for the next type.
+ self._register_resource_change(
+ logical_id=name,
+ physical_id=None,
+ type_=after.resource_type,
+ before_properties=None,
+ after_properties=after.properties,
+ )
+ elif before is not None:
+ # Case: removal
+ self._register_resource_change(
+ logical_id=name,
+ physical_id=before.physical_resource_id,
+ type_=before.resource_type,
+ before_properties=before.properties,
+ after_properties=None,
+ )
+ elif after is not None:
+ # Case: addition
+ self._register_resource_change(
+ logical_id=name,
+ physical_id=None,
+ type_=after.resource_type,
+ before_properties=None,
+ after_properties=after.properties,
+ )
- # def visit_node_resources(self, node_resources: NodeResources) -> DescribeUnit:
- # for node_resource in node_resources.resources:
- # if node_resource.change_type != ChangeType.UNCHANGED:
- # self.visit_node_resource(node_resource=node_resource)
- # # TODO
- # return None
+ def visit_node_resource(
+ self, node_resource: NodeResource
+ ) -> PreprocEntityDelta[PreprocResource, PreprocResource]:
+ delta = super().visit_node_resource(node_resource=node_resource)
+ self._describe_resource_change(
+ name=node_resource.name, before=delta.before, after=delta.after
+ )
+ return delta
diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_executor.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_executor.py
new file mode 100644
index 0000000000000..4ce4c2fad2db1
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_executor.py
@@ -0,0 +1,312 @@
+import copy
+import logging
+import uuid
+from dataclasses import dataclass
+from typing import Final, Optional
+
+from localstack.aws.api.cloudformation import ChangeAction, StackStatus
+from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY
+from localstack.services.cloudformation.engine.v2.change_set_model import (
+ NodeOutput,
+ NodeParameter,
+ NodeResource,
+)
+from localstack.services.cloudformation.engine.v2.change_set_model_preproc import (
+ ChangeSetModelPreproc,
+ PreprocEntityDelta,
+ PreprocOutput,
+ PreprocProperties,
+ PreprocResource,
+)
+from localstack.services.cloudformation.resource_provider import (
+ Credentials,
+ OperationStatus,
+ ProgressEvent,
+ ResourceProviderExecutor,
+ ResourceProviderPayload,
+)
+from localstack.services.cloudformation.v2.entities import ChangeSet
+
+LOG = logging.getLogger(__name__)
+
+
+@dataclass
+class ChangeSetModelExecutorResult:
+ resources: dict
+ parameters: dict
+ outputs: dict
+
+
+class ChangeSetModelExecutor(ChangeSetModelPreproc):
+ _change_set: Final[ChangeSet]
+ # TODO: add typing for resolved resources and parameters.
+ resources: Final[dict]
+ outputs: Final[dict]
+ resolved_parameters: Final[dict]
+
+ def __init__(self, change_set: ChangeSet):
+ super().__init__(
+ node_template=change_set.update_graph,
+ before_resolved_resources=change_set.stack.resolved_resources,
+ )
+ self._change_set = change_set
+ self.resources = dict()
+ self.outputs = dict()
+ self.resolved_parameters = dict()
+
+ # TODO: use a structured type for the return value
+ def execute(self) -> ChangeSetModelExecutorResult:
+ self.process()
+ return ChangeSetModelExecutorResult(
+ resources=self.resources, parameters=self.resolved_parameters, outputs=self.outputs
+ )
+
+ def visit_node_parameter(self, node_parameter: NodeParameter) -> PreprocEntityDelta:
+ delta = super().visit_node_parameter(node_parameter=node_parameter)
+ self.resolved_parameters[node_parameter.name] = delta.after
+ return delta
+
+ def _after_resource_physical_id(self, resource_logical_id: str) -> Optional[str]:
+ after_resolved_resources = self.resources
+ return self._resource_physical_resource_id_from(
+ logical_resource_id=resource_logical_id, resolved_resources=after_resolved_resources
+ )
+
+ def visit_node_resource(
+ self, node_resource: NodeResource
+ ) -> PreprocEntityDelta[PreprocResource, PreprocResource]:
+ """
+ Overrides the default preprocessing for NodeResource objects by annotating the
+ `after` delta with the physical resource ID, if side effects resulted in an update.
+ """
+ delta = super().visit_node_resource(node_resource=node_resource)
+ self._execute_on_resource_change(
+ name=node_resource.name, before=delta.before, after=delta.after
+ )
+ after_resource = delta.after
+ if after_resource is not None and delta.before != delta.after:
+ after_logical_id = after_resource.logical_id
+ after_physical_id: Optional[str] = self._after_resource_physical_id(
+ resource_logical_id=after_logical_id
+ )
+ if after_physical_id is None:
+ raise RuntimeError(
+ f"No PhysicalResourceId was found for resource '{after_physical_id}' post-update."
+ )
+ after_resource.physical_resource_id = after_physical_id
+ return delta
+
+ def visit_node_output(
+ self, node_output: NodeOutput
+ ) -> PreprocEntityDelta[PreprocOutput, PreprocOutput]:
+ delta = super().visit_node_output(node_output=node_output)
+ if delta.after is None:
+ # handling deletion so the output does not really matter
+ # TODO: are there other situations?
+ return delta
+
+ self.outputs[delta.after.name] = delta.after.value
+ return delta
+
+ def _execute_on_resource_change(
+ self, name: str, before: Optional[PreprocResource], after: Optional[PreprocResource]
+ ) -> None:
+ if before == after:
+ # unchanged: nothing to do.
+ return
+ # TODO: this logic is a POC and should be revised.
+ if before is not None and after is not None:
+ # Case: change on same type.
+ if before.resource_type == after.resource_type:
+ # Register a Modified if changed.
+ # XXX hacky, stick the previous resources' properties into the payload
+ before_properties = self._merge_before_properties(name, before)
+
+ self._execute_resource_action(
+ action=ChangeAction.Modify,
+ logical_resource_id=name,
+ resource_type=before.resource_type,
+ before_properties=before_properties,
+ after_properties=after.properties,
+ )
+ # Case: type migration.
+ # TODO: Add test to assert that on type change the resources are replaced.
+ else:
+ # XXX hacky, stick the previous resources' properties into the payload
+ before_properties = self._merge_before_properties(name, before)
+ # Register a Removed for the previous type.
+ self._execute_resource_action(
+ action=ChangeAction.Remove,
+ logical_resource_id=name,
+ resource_type=before.resource_type,
+ before_properties=before_properties,
+ after_properties=None,
+ )
+ # Register a Create for the next type.
+ self._execute_resource_action(
+ action=ChangeAction.Add,
+ logical_resource_id=name,
+ resource_type=after.resource_type,
+ before_properties=None,
+ after_properties=after.properties,
+ )
+ elif before is not None:
+ # Case: removal
+ # XXX hacky, stick the previous resources' properties into the payload
+ # XXX hacky, stick the previous resources' properties into the payload
+ before_properties = self._merge_before_properties(name, before)
+
+ self._execute_resource_action(
+ action=ChangeAction.Remove,
+ logical_resource_id=name,
+ resource_type=before.resource_type,
+ before_properties=before_properties,
+ after_properties=None,
+ )
+ elif after is not None:
+ # Case: addition
+ self._execute_resource_action(
+ action=ChangeAction.Add,
+ logical_resource_id=name,
+ resource_type=after.resource_type,
+ before_properties=None,
+ after_properties=after.properties,
+ )
+
+ def _merge_before_properties(
+ self, name: str, preproc_resource: PreprocResource
+ ) -> PreprocProperties:
+ if previous_resource_properties := self._change_set.stack.resolved_resources.get(
+ name, {}
+ ).get("Properties"):
+ return PreprocProperties(properties=previous_resource_properties)
+
+ # XXX fall back to returning the input value
+ return copy.deepcopy(preproc_resource.properties)
+
+ def _execute_resource_action(
+ self,
+ action: ChangeAction,
+ logical_resource_id: str,
+ resource_type: str,
+ before_properties: Optional[PreprocProperties],
+ after_properties: Optional[PreprocProperties],
+ ) -> None:
+ LOG.debug("Executing resource action: %s for resource '%s'", action, logical_resource_id)
+ resource_provider_executor = ResourceProviderExecutor(
+ stack_name=self._change_set.stack.stack_name, stack_id=self._change_set.stack.stack_id
+ )
+ payload = self.create_resource_provider_payload(
+ action=action,
+ logical_resource_id=logical_resource_id,
+ resource_type=resource_type,
+ before_properties=before_properties,
+ after_properties=after_properties,
+ )
+ resource_provider = resource_provider_executor.try_load_resource_provider(resource_type)
+
+ extra_resource_properties = {}
+ if resource_provider is not None:
+ # TODO: stack events
+ try:
+ event = resource_provider_executor.deploy_loop(
+ resource_provider, extra_resource_properties, payload
+ )
+ except Exception as e:
+ reason = str(e)
+ LOG.warning(
+ "Resource provider operation failed: '%s'",
+ reason,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ stack = self._change_set.stack
+ stack_status = stack.status
+ if stack_status == StackStatus.CREATE_IN_PROGRESS:
+ stack.set_stack_status(StackStatus.CREATE_FAILED, reason=reason)
+ elif stack_status == StackStatus.UPDATE_IN_PROGRESS:
+ stack.set_stack_status(StackStatus.UPDATE_FAILED, reason=reason)
+ return
+ else:
+ event = ProgressEvent(OperationStatus.SUCCESS, resource_model={})
+
+ self.resources.setdefault(logical_resource_id, {"Properties": {}})
+ match event.status:
+ case OperationStatus.SUCCESS:
+ # merge the resources state with the external state
+ # TODO: this is likely a duplicate of updating from extra_resource_properties
+ self.resources[logical_resource_id]["Properties"].update(event.resource_model)
+ self.resources[logical_resource_id].update(extra_resource_properties)
+ # XXX for legacy delete_stack compatibility
+ self.resources[logical_resource_id]["LogicalResourceId"] = logical_resource_id
+ self.resources[logical_resource_id]["Type"] = resource_type
+ case OperationStatus.FAILED:
+ reason = event.message
+ LOG.warning(
+ "Resource provider operation failed: '%s'",
+ reason,
+ )
+ # TODO: duplication
+ stack = self._change_set.stack
+ stack_status = stack.status
+ if stack_status == StackStatus.CREATE_IN_PROGRESS:
+ stack.set_stack_status(StackStatus.CREATE_FAILED, reason=reason)
+ elif stack_status == StackStatus.UPDATE_IN_PROGRESS:
+ stack.set_stack_status(StackStatus.UPDATE_FAILED, reason=reason)
+ else:
+ raise NotImplementedError(f"Unhandled stack status: '{stack.status}'")
+ case any:
+ raise NotImplementedError(f"Event status '{any}' not handled")
+
+ def create_resource_provider_payload(
+ self,
+ action: ChangeAction,
+ logical_resource_id: str,
+ resource_type: str,
+ before_properties: Optional[PreprocProperties],
+ after_properties: Optional[PreprocProperties],
+ ) -> Optional[ResourceProviderPayload]:
+ # FIXME: use proper credentials
+ creds: Credentials = {
+ "accessKeyId": self._change_set.stack.account_id,
+ "secretAccessKey": INTERNAL_AWS_SECRET_ACCESS_KEY,
+ "sessionToken": "",
+ }
+ before_properties_value = before_properties.properties if before_properties else None
+ after_properties_value = after_properties.properties if after_properties else None
+
+ match action:
+ case ChangeAction.Add:
+ resource_properties = after_properties_value or {}
+ previous_resource_properties = None
+ case ChangeAction.Modify | ChangeAction.Dynamic:
+ resource_properties = after_properties_value or {}
+ previous_resource_properties = before_properties_value or {}
+ case ChangeAction.Remove:
+ resource_properties = before_properties_value or {}
+ previous_resource_properties = None
+ case _:
+ raise NotImplementedError(f"Action '{action}' not handled")
+
+ resource_provider_payload: ResourceProviderPayload = {
+ "awsAccountId": self._change_set.stack.account_id,
+ "callbackContext": {},
+ "stackId": self._change_set.stack.stack_name,
+ "resourceType": resource_type,
+ "resourceTypeVersion": "000000",
+ # TODO: not actually a UUID
+ "bearerToken": str(uuid.uuid4()),
+ "region": self._change_set.stack.region_name,
+ "action": str(action),
+ "requestData": {
+ "logicalResourceId": logical_resource_id,
+ "resourceProperties": resource_properties,
+ "previousResourceProperties": previous_resource_properties,
+ "callerCredentials": creds,
+ "providerCredentials": creds,
+ "systemTags": {},
+ "previousSystemTags": {},
+ "stackTags": {},
+ "previousStackTags": {},
+ },
+ }
+ return resource_provider_payload
diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_preproc.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_preproc.py
new file mode 100644
index 0000000000000..08382da63faf2
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_preproc.py
@@ -0,0 +1,611 @@
+from __future__ import annotations
+
+from typing import Any, Final, Generic, Optional, TypeVar
+
+from localstack.services.cloudformation.engine.v2.change_set_model import (
+ ChangeSetEntity,
+ ChangeType,
+ NodeArray,
+ NodeCondition,
+ NodeDivergence,
+ NodeIntrinsicFunction,
+ NodeMapping,
+ NodeObject,
+ NodeOutput,
+ NodeOutputs,
+ NodeParameter,
+ NodeProperties,
+ NodeProperty,
+ NodeResource,
+ NodeTemplate,
+ NothingType,
+ Scope,
+ TerminalValue,
+ TerminalValueCreated,
+ TerminalValueModified,
+ TerminalValueRemoved,
+ TerminalValueUnchanged,
+)
+from localstack.services.cloudformation.engine.v2.change_set_model_visitor import (
+ ChangeSetModelVisitor,
+)
+
+TBefore = TypeVar("TBefore")
+TAfter = TypeVar("TAfter")
+
+
+class PreprocEntityDelta(Generic[TBefore, TAfter]):
+ before: Optional[TBefore]
+ after: Optional[TAfter]
+
+ def __init__(self, before: Optional[TBefore] = None, after: Optional[TAfter] = None):
+ self.before = before
+ self.after = after
+
+ def __eq__(self, other):
+ if not isinstance(other, PreprocEntityDelta):
+ return False
+ return self.before == other.before and self.after == other.after
+
+
+class PreprocProperties:
+ properties: dict[str, Any]
+
+ def __init__(self, properties: dict[str, Any]):
+ self.properties = properties
+
+ def __eq__(self, other):
+ if not isinstance(other, PreprocProperties):
+ return False
+ return self.properties == other.properties
+
+
+class PreprocResource:
+ logical_id: str
+ physical_resource_id: Optional[str]
+ condition: Optional[bool]
+ resource_type: str
+ properties: PreprocProperties
+
+ def __init__(
+ self,
+ logical_id: str,
+ physical_resource_id: str,
+ condition: Optional[bool],
+ resource_type: str,
+ properties: PreprocProperties,
+ ):
+ self.logical_id = logical_id
+ self.physical_resource_id = physical_resource_id
+ self.condition = condition
+ self.resource_type = resource_type
+ self.properties = properties
+
+ @staticmethod
+ def _compare_conditions(c1: bool, c2: bool):
+ # The lack of condition equates to a true condition.
+ c1 = c1 if isinstance(c1, bool) else True
+ c2 = c2 if isinstance(c2, bool) else True
+ return c1 == c2
+
+ def __eq__(self, other):
+ if not isinstance(other, PreprocResource):
+ return False
+ return all(
+ [
+ self.logical_id == other.logical_id,
+ self._compare_conditions(self.condition, other.condition),
+ self.resource_type == other.resource_type,
+ self.properties == other.properties,
+ ]
+ )
+
+
+class PreprocOutput:
+ name: str
+ value: Any
+ export: Optional[Any]
+ condition: Optional[bool]
+
+ def __init__(self, name: str, value: Any, export: Optional[Any], condition: Optional[bool]):
+ self.name = name
+ self.value = value
+ self.export = export
+ self.condition = condition
+
+ def __eq__(self, other):
+ if not isinstance(other, PreprocOutput):
+ return False
+ return all(
+ [
+ self.name == other.name,
+ self.value == other.value,
+ self.export == other.export,
+ self.condition == other.condition,
+ ]
+ )
+
+
+class ChangeSetModelPreproc(ChangeSetModelVisitor):
+ _node_template: Final[NodeTemplate]
+ _before_resolved_resources: Final[dict]
+ _processed: dict[Scope, Any]
+
+ def __init__(self, node_template: NodeTemplate, before_resolved_resources: dict):
+ self._node_template = node_template
+ self._before_resolved_resources = before_resolved_resources
+ self._processed = dict()
+
+ def process(self) -> None:
+ self._processed.clear()
+ self.visit(self._node_template)
+
+ def _get_node_resource_for(
+ self, resource_name: str, node_template: NodeTemplate
+ ) -> NodeResource:
+ # TODO: this could be improved with hashmap lookups if the Node contained bindings and not lists.
+ for node_resource in node_template.resources.resources:
+ if node_resource.name == resource_name:
+ return node_resource
+ # TODO
+ raise RuntimeError()
+
+ def _get_node_property_for(
+ self, property_name: str, node_resource: NodeResource
+ ) -> NodeProperty:
+ # TODO: this could be improved with hashmap lookups if the Node contained bindings and not lists.
+ for node_property in node_resource.properties.properties:
+ if node_property.name == property_name:
+ return node_property
+ # TODO
+ raise RuntimeError()
+
+ def _get_node_mapping(self, map_name: str) -> NodeMapping:
+ mappings: list[NodeMapping] = self._node_template.mappings.mappings
+ # TODO: another scenarios suggesting property lookups might be preferable.
+ for mapping in mappings:
+ if mapping.name == map_name:
+ return mapping
+ # TODO
+ raise RuntimeError()
+
+ def _get_node_parameter_if_exists(self, parameter_name: str) -> Optional[NodeParameter]:
+ parameters: list[NodeParameter] = self._node_template.parameters.parameters
+ # TODO: another scenarios suggesting property lookups might be preferable.
+ for parameter in parameters:
+ if parameter.name == parameter_name:
+ return parameter
+ return None
+
+ def _get_node_condition_if_exists(self, condition_name: str) -> Optional[NodeCondition]:
+ conditions: list[NodeCondition] = self._node_template.conditions.conditions
+ # TODO: another scenarios suggesting property lookups might be preferable.
+ for condition in conditions:
+ if condition.name == condition_name:
+ return condition
+ return None
+
+ def _resolve_reference(self, logical_id: str) -> PreprocEntityDelta:
+ node_condition = self._get_node_condition_if_exists(condition_name=logical_id)
+ if isinstance(node_condition, NodeCondition):
+ condition_delta = self.visit(node_condition)
+ return condition_delta
+
+ node_parameter = self._get_node_parameter_if_exists(parameter_name=logical_id)
+ if isinstance(node_parameter, NodeParameter):
+ parameter_delta = self.visit(node_parameter)
+ return parameter_delta
+
+ # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
+ node_resource = self._get_node_resource_for(
+ resource_name=logical_id, node_template=self._node_template
+ )
+ resource_delta = self.visit(node_resource)
+ before = resource_delta.before
+ after = resource_delta.after
+ return PreprocEntityDelta(before=before, after=after)
+
+ def _resolve_mapping(
+ self, map_name: str, top_level_key: str, second_level_key
+ ) -> PreprocEntityDelta:
+ # TODO: add support for nested intrinsic functions, and KNOWN AFTER APPLY logical ids.
+ node_mapping: NodeMapping = self._get_node_mapping(map_name=map_name)
+ top_level_value = node_mapping.bindings.bindings.get(top_level_key)
+ if not isinstance(top_level_value, NodeObject):
+ raise RuntimeError()
+ second_level_value = top_level_value.bindings.get(second_level_key)
+ mapping_value_delta = self.visit(second_level_value)
+ return mapping_value_delta
+
+ def _resolve_reference_binding(
+ self, before_logical_id: Optional[str], after_logical_id: Optional[str]
+ ) -> PreprocEntityDelta:
+ before = None
+ if before_logical_id is not None:
+ before_delta = self._resolve_reference(logical_id=before_logical_id)
+ before = before_delta.before
+ after = None
+ if after_logical_id is not None:
+ after_delta = self._resolve_reference(logical_id=after_logical_id)
+ after = after_delta.after
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit(self, change_set_entity: ChangeSetEntity) -> PreprocEntityDelta:
+ delta = self._processed.get(change_set_entity.scope)
+ if delta is not None:
+ return delta
+ delta = super().visit(change_set_entity=change_set_entity)
+ self._processed[change_set_entity.scope] = delta
+ return delta
+
+ def visit_terminal_value_modified(
+ self, terminal_value_modified: TerminalValueModified
+ ) -> PreprocEntityDelta:
+ return PreprocEntityDelta(
+ before=terminal_value_modified.value,
+ after=terminal_value_modified.modified_value,
+ )
+
+ def visit_terminal_value_created(
+ self, terminal_value_created: TerminalValueCreated
+ ) -> PreprocEntityDelta:
+ return PreprocEntityDelta(after=terminal_value_created.value)
+
+ def visit_terminal_value_removed(
+ self, terminal_value_removed: TerminalValueRemoved
+ ) -> PreprocEntityDelta:
+ return PreprocEntityDelta(before=terminal_value_removed.value)
+
+ def visit_terminal_value_unchanged(
+ self, terminal_value_unchanged: TerminalValueUnchanged
+ ) -> PreprocEntityDelta:
+ return PreprocEntityDelta(
+ before=terminal_value_unchanged.value,
+ after=terminal_value_unchanged.value,
+ )
+
+ def visit_node_divergence(self, node_divergence: NodeDivergence) -> PreprocEntityDelta:
+ before_delta = self.visit(node_divergence.value)
+ after_delta = self.visit(node_divergence.divergence)
+ return PreprocEntityDelta(before=before_delta.before, after=after_delta.after)
+
+ def visit_node_object(self, node_object: NodeObject) -> PreprocEntityDelta:
+ before = dict()
+ after = dict()
+ for name, change_set_entity in node_object.bindings.items():
+ delta: PreprocEntityDelta = self.visit(change_set_entity=change_set_entity)
+ match change_set_entity.change_type:
+ case ChangeType.MODIFIED:
+ before[name] = delta.before
+ after[name] = delta.after
+ case ChangeType.CREATED:
+ after[name] = delta.after
+ case ChangeType.REMOVED:
+ before[name] = delta.before
+ case ChangeType.UNCHANGED:
+ before[name] = delta.before
+ after[name] = delta.before
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_intrinsic_function_fn_get_att(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ) -> PreprocEntityDelta:
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+ # TODO: validate the return value according to the spec.
+ before_argument_list = arguments_delta.before
+ after_argument_list = arguments_delta.after
+
+ before = None
+ if before_argument_list:
+ before_logical_name_of_resource = before_argument_list[0]
+ before_attribute_name = before_argument_list[1]
+ before_node_resource = self._get_node_resource_for(
+ resource_name=before_logical_name_of_resource, node_template=self._node_template
+ )
+ before_node_property = self._get_node_property_for(
+ property_name=before_attribute_name, node_resource=before_node_resource
+ )
+ before_property_delta = self.visit(before_node_property)
+ before = before_property_delta.before
+
+ after = None
+ if after_argument_list:
+ # TODO: when are values only accessible at runtime?
+ after_logical_name_of_resource = after_argument_list[0]
+ after_attribute_name = after_argument_list[1]
+ after_node_resource = self._get_node_resource_for(
+ resource_name=after_logical_name_of_resource, node_template=self._node_template
+ )
+ after_node_property = self._get_node_property_for(
+ property_name=after_attribute_name, node_resource=after_node_resource
+ )
+ after_property_delta = self.visit(after_node_property)
+ after = after_property_delta.after
+
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_intrinsic_function_fn_equals(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ) -> PreprocEntityDelta:
+ # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+ before_values = arguments_delta.before
+ after_values = arguments_delta.after
+ before = None
+ if before_values:
+ before = before_values[0] == before_values[1]
+ after = None
+ if after_values:
+ after = after_values[0] == after_values[1]
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_intrinsic_function_fn_if(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ) -> PreprocEntityDelta:
+ # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+
+ def _compute_delta_for_if_statement(args: list[Any]) -> PreprocEntityDelta:
+ condition_name = args[0]
+ boolean_expression_delta = self._resolve_reference(logical_id=condition_name)
+ return PreprocEntityDelta(
+ before=args[1] if boolean_expression_delta.before else args[2],
+ after=args[1] if boolean_expression_delta.after else args[2],
+ )
+
+ # TODO: add support for this being created or removed.
+ before_outcome_delta = _compute_delta_for_if_statement(arguments_delta.before)
+ before = before_outcome_delta.before
+ after_outcome_delta = _compute_delta_for_if_statement(arguments_delta.after)
+ after = after_outcome_delta.after
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_intrinsic_function_fn_not(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ) -> PreprocEntityDelta:
+ # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
+ # TODO: add type checking/validation for result unit?
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+ before_condition = arguments_delta.before
+ after_condition = arguments_delta.after
+ if before_condition:
+ before_condition_outcome = before_condition[0]
+ before = not before_condition_outcome
+ else:
+ before = None
+
+ if after_condition:
+ after_condition_outcome = after_condition[0]
+ after = not after_condition_outcome
+ else:
+ after = None
+ # Implicit change type computation.
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_intrinsic_function_fn_find_in_map(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ) -> PreprocEntityDelta:
+ # TODO: check for KNOWN AFTER APPLY values for logical ids coming from intrinsic functions as arguments.
+ # TODO: add type checking/validation for result unit?
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+ before_arguments = arguments_delta.before
+ after_arguments = arguments_delta.after
+ if before_arguments:
+ before_value_delta = self._resolve_mapping(*before_arguments)
+ before = before_value_delta.before
+ else:
+ before = None
+ if after_arguments:
+ after_value_delta = self._resolve_mapping(*after_arguments)
+ after = after_value_delta.after
+ else:
+ after = None
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_mapping(self, node_mapping: NodeMapping) -> PreprocEntityDelta:
+ bindings_delta = self.visit(node_mapping.bindings)
+ return bindings_delta
+
+ def visit_node_parameter(self, node_parameter: NodeParameter) -> PreprocEntityDelta:
+ dynamic_value = node_parameter.dynamic_value
+ dynamic_delta = self.visit(dynamic_value)
+
+ default_value = node_parameter.default_value
+ default_delta = self.visit(default_value)
+
+ before = dynamic_delta.before or default_delta.before
+ after = dynamic_delta.after or default_delta.after
+
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_condition(self, node_condition: NodeCondition) -> PreprocEntityDelta:
+ delta = self.visit(node_condition.body)
+ return delta
+
+ def _resource_physical_resource_id_from(
+ self, logical_resource_id: str, resolved_resources: dict
+ ) -> Optional[str]:
+ # TODO: typing around resolved resources is needed and should be reflected here.
+ resolved_resource = resolved_resources.get(logical_resource_id)
+ if resolved_resource is None:
+ return None
+ physical_resource_id: Optional[str] = resolved_resource.get("PhysicalResourceId")
+ if not isinstance(physical_resource_id, str):
+ raise RuntimeError(f"No PhysicalResourceId found for resource '{logical_resource_id}'")
+ return physical_resource_id
+
+ def _before_resource_physical_id(self, resource_logical_id: str) -> Optional[str]:
+ # TODO: typing around resolved resources is needed and should be reflected here.
+ return self._resource_physical_resource_id_from(
+ logical_resource_id=resource_logical_id,
+ resolved_resources=self._before_resolved_resources,
+ )
+
+ def _after_resource_physical_id(self, resource_logical_id: str) -> Optional[str]:
+ return self._before_resource_physical_id(resource_logical_id=resource_logical_id)
+
+ def visit_node_intrinsic_function_ref(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ) -> PreprocEntityDelta:
+ arguments_delta = self.visit(node_intrinsic_function.arguments)
+ before_logical_id = arguments_delta.before
+ after_logical_id = arguments_delta.after
+
+ # TODO: extend this to support references to other types.
+ before = None
+ if before_logical_id is not None:
+ before_delta = self._resolve_reference(logical_id=before_logical_id)
+ before = before_delta.before
+ if isinstance(before, PreprocResource):
+ before = before.physical_resource_id
+
+ after = None
+ if after_logical_id is not None:
+ after_delta = self._resolve_reference(logical_id=after_logical_id)
+ after = after_delta.after
+ if isinstance(after, PreprocResource):
+ after = after.physical_resource_id
+
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_array(self, node_array: NodeArray) -> PreprocEntityDelta:
+ before = list()
+ after = list()
+ for change_set_entity in node_array.array:
+ delta: PreprocEntityDelta = self.visit(change_set_entity=change_set_entity)
+ if delta.before:
+ before.append(delta.before)
+ if delta.after:
+ after.append(delta.after)
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_property(self, node_property: NodeProperty) -> PreprocEntityDelta:
+ return self.visit(node_property.value)
+
+ def visit_node_properties(
+ self, node_properties: NodeProperties
+ ) -> PreprocEntityDelta[PreprocProperties, PreprocProperties]:
+ before_bindings: dict[str, Any] = dict()
+ after_bindings: dict[str, Any] = dict()
+ for node_property in node_properties.properties:
+ delta = self.visit(node_property)
+ property_name = node_property.name
+ if node_property.change_type != ChangeType.CREATED:
+ before_bindings[property_name] = delta.before
+ if node_property.change_type != ChangeType.REMOVED:
+ after_bindings[property_name] = delta.after
+ before = PreprocProperties(properties=before_bindings)
+ after = PreprocProperties(properties=after_bindings)
+ return PreprocEntityDelta(before=before, after=after)
+
+ def _resolve_resource_condition_reference(self, reference: TerminalValue) -> PreprocEntityDelta:
+ reference_delta = self.visit(reference)
+ before_reference = reference_delta.before
+ after_reference = reference_delta.after
+ condition_delta = self._resolve_reference_binding(
+ before_logical_id=before_reference, after_logical_id=after_reference
+ )
+ before = condition_delta.before if not isinstance(before_reference, NothingType) else True
+ after = condition_delta.after if not isinstance(after_reference, NothingType) else True
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_resource(
+ self, node_resource: NodeResource
+ ) -> PreprocEntityDelta[PreprocResource, PreprocResource]:
+ change_type = node_resource.change_type
+ condition_before = None
+ condition_after = None
+ if node_resource.condition_reference is not None:
+ condition_delta = self._resolve_resource_condition_reference(
+ node_resource.condition_reference
+ )
+ condition_before = condition_delta.before
+ condition_after = condition_delta.after
+
+ type_delta = self.visit(node_resource.type_)
+ properties_delta: PreprocEntityDelta[PreprocProperties, PreprocProperties] = self.visit(
+ node_resource.properties
+ )
+
+ before = None
+ after = None
+ if change_type != ChangeType.CREATED and condition_before is None or condition_before:
+ logical_resource_id = node_resource.name
+ before_physical_resource_id = self._before_resource_physical_id(
+ resource_logical_id=logical_resource_id
+ )
+ before = PreprocResource(
+ logical_id=logical_resource_id,
+ physical_resource_id=before_physical_resource_id,
+ condition=condition_before,
+ resource_type=type_delta.before,
+ properties=properties_delta.before,
+ )
+ if change_type != ChangeType.REMOVED and condition_after is None or condition_after:
+ logical_resource_id = node_resource.name
+ after_physical_resource_id = self._after_resource_physical_id(
+ resource_logical_id=logical_resource_id
+ )
+ after = PreprocResource(
+ logical_id=logical_resource_id,
+ physical_resource_id=after_physical_resource_id,
+ condition=condition_after,
+ resource_type=type_delta.after,
+ properties=properties_delta.after,
+ )
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_output(
+ self, node_output: NodeOutput
+ ) -> PreprocEntityDelta[PreprocOutput, PreprocOutput]:
+ change_type = node_output.change_type
+ value_delta = self.visit(node_output.value)
+
+ condition_delta = None
+ if node_output.condition_reference is not None:
+ condition_delta = self._resolve_resource_condition_reference(
+ node_output.condition_reference
+ )
+ condition_before = condition_delta.before
+ condition_after = condition_delta.after
+ if not condition_before and condition_after:
+ change_type = ChangeType.CREATED
+ elif condition_before and not condition_after:
+ change_type = ChangeType.REMOVED
+
+ export_delta = None
+ if node_output.export is not None:
+ export_delta = self.visit(node_output.export)
+
+ before: Optional[PreprocOutput] = None
+ if change_type != ChangeType.CREATED:
+ before = PreprocOutput(
+ name=node_output.name,
+ value=value_delta.before,
+ export=export_delta.before if export_delta else None,
+ condition=condition_delta.before if condition_delta else None,
+ )
+ after: Optional[PreprocOutput] = None
+ if change_type != ChangeType.REMOVED:
+ after = PreprocOutput(
+ name=node_output.name,
+ value=value_delta.after,
+ export=export_delta.after if export_delta else None,
+ condition=condition_delta.after if condition_delta else None,
+ )
+ return PreprocEntityDelta(before=before, after=after)
+
+ def visit_node_outputs(
+ self, node_outputs: NodeOutputs
+ ) -> PreprocEntityDelta[list[PreprocOutput], list[PreprocOutput]]:
+ before: list[PreprocOutput] = list()
+ after: list[PreprocOutput] = list()
+ for node_output in node_outputs.outputs:
+ output_delta: PreprocEntityDelta[PreprocOutput, PreprocOutput] = self.visit(node_output)
+ output_before = output_delta.before
+ output_after = output_delta.after
+ if output_before:
+ before.append(output_before)
+ if output_after:
+ after.append(output_after)
+ return PreprocEntityDelta(before=before, after=after)
diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py
index 39ef67e912313..80b93b820f8de 100644
--- a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py
+++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py
@@ -7,7 +7,11 @@
NodeConditions,
NodeDivergence,
NodeIntrinsicFunction,
+ NodeMapping,
+ NodeMappings,
NodeObject,
+ NodeOutput,
+ NodeOutputs,
NodeParameter,
NodeParameters,
NodeProperties,
@@ -45,6 +49,18 @@ def visit_children(self, change_set_entity: ChangeSetEntity):
def visit_node_template(self, node_template: NodeTemplate):
self.visit_children(node_template)
+ def visit_node_outputs(self, node_outputs: NodeOutputs):
+ self.visit_children(node_outputs)
+
+ def visit_node_output(self, node_output: NodeOutput):
+ self.visit_children(node_output)
+
+ def visit_node_mapping(self, node_mapping: NodeMapping):
+ self.visit_children(node_mapping)
+
+ def visit_node_mappings(self, node_mappings: NodeMappings):
+ self.visit_children(node_mappings)
+
def visit_node_parameters(self, node_parameters: NodeParameters):
self.visit_children(node_parameters)
@@ -94,6 +110,11 @@ def visit_node_intrinsic_function_fn_if(self, node_intrinsic_function: NodeIntri
def visit_node_intrinsic_function_fn_not(self, node_intrinsic_function: NodeIntrinsicFunction):
self.visit_children(node_intrinsic_function)
+ def visit_node_intrinsic_function_fn_find_in_map(
+ self, node_intrinsic_function: NodeIntrinsicFunction
+ ):
+ self.visit_children(node_intrinsic_function)
+
def visit_node_intrinsic_function_ref(self, node_intrinsic_function: NodeIntrinsicFunction):
self.visit_children(node_intrinsic_function)
diff --git a/localstack-core/localstack/services/cloudformation/resource_provider.py b/localstack-core/localstack/services/cloudformation/resource_provider.py
index 04d7e8f60b4c8..7e48ed8ca5703 100644
--- a/localstack-core/localstack/services/cloudformation/resource_provider.py
+++ b/localstack-core/localstack/services/cloudformation/resource_provider.py
@@ -444,9 +444,7 @@ def deploy_loop(
max_iterations = max(ceil(max_timeout / sleep_time), 2)
for current_iteration in range(max_iterations):
- resource_type = get_resource_type(
- {"Type": raw_payload["resourceType"]}
- ) # TODO: simplify signature of get_resource_type to just take the type
+ resource_type = get_resource_type({"Type": raw_payload["resourceType"]})
resource["SpecifiedProperties"] = raw_payload["requestData"]["resourceProperties"]
try:
diff --git a/localstack-core/localstack/services/cloudformation/stores.py b/localstack-core/localstack/services/cloudformation/stores.py
index 11c8fa0cbb879..7191f5491b4e1 100644
--- a/localstack-core/localstack/services/cloudformation/stores.py
+++ b/localstack-core/localstack/services/cloudformation/stores.py
@@ -3,6 +3,8 @@
from localstack.aws.api.cloudformation import StackStatus
from localstack.services.cloudformation.engine.entities import Stack, StackChangeSet, StackSet
+from localstack.services.cloudformation.v2.entities import ChangeSet as ChangeSetV2
+from localstack.services.cloudformation.v2.entities import Stack as StackV2
from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute
LOG = logging.getLogger(__name__)
@@ -11,6 +13,9 @@
class CloudFormationStore(BaseStore):
# maps stack ID to stack details
stacks: dict[str, Stack] = LocalAttribute(default=dict)
+ stacks_v2: dict[str, StackV2] = LocalAttribute(default=dict)
+
+ change_sets: dict[str, ChangeSetV2] = LocalAttribute(default=dict)
# maps stack set ID to stack set details
stack_sets: dict[str, StackSet] = LocalAttribute(default=dict)
diff --git a/localstack-core/localstack/services/cloudformation/v2/entities.py b/localstack-core/localstack/services/cloudformation/v2/entities.py
new file mode 100644
index 0000000000000..31de16b69613e
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/v2/entities.py
@@ -0,0 +1,221 @@
+from datetime import datetime, timezone
+from typing import TypedDict
+
+from localstack.aws.api.cloudformation import (
+ Changes,
+ ChangeSetStatus,
+ ChangeSetType,
+ CreateChangeSetInput,
+ DescribeChangeSetOutput,
+ ExecutionStatus,
+ Output,
+ Parameter,
+ StackDriftInformation,
+ StackDriftStatus,
+ StackStatus,
+ StackStatusReason,
+)
+from localstack.aws.api.cloudformation import (
+ Stack as ApiStack,
+)
+from localstack.services.cloudformation.engine.entities import (
+ StackIdentifier,
+ StackTemplate,
+)
+from localstack.services.cloudformation.engine.v2.change_set_model import (
+ ChangeSetModel,
+ NodeTemplate,
+)
+from localstack.services.cloudformation.engine.v2.change_set_model_describer import (
+ ChangeSetModelDescriber,
+)
+from localstack.utils.aws import arns
+from localstack.utils.strings import short_uid
+
+
+class ResolvedResource(TypedDict):
+ Properties: dict
+
+
+class Stack:
+ stack_name: str
+ parameters: list[Parameter]
+ change_set_id: str | None
+ change_set_name: str | None
+ status: StackStatus
+ status_reason: StackStatusReason | None
+ stack_id: str
+ creation_time: datetime
+
+ # state after deploy
+ resolved_parameters: dict[str, str]
+ resolved_resources: dict[str, ResolvedResource]
+ resolved_outputs: dict[str, str]
+
+ def __init__(
+ self,
+ account_id: str,
+ region_name: str,
+ request_payload: CreateChangeSetInput,
+ template: StackTemplate | None = None,
+ template_body: str | None = None,
+ change_set_ids: list[str] | None = None,
+ ):
+ self.account_id = account_id
+ self.region_name = region_name
+ self.template = template
+ self.template_body = template_body
+ self.status = StackStatus.CREATE_IN_PROGRESS
+ self.status_reason = None
+ self.change_set_ids = change_set_ids or []
+ self.creation_time = datetime.now(tz=timezone.utc)
+
+ self.stack_name = request_payload["StackName"]
+ self.change_set_name = request_payload.get("ChangeSetName")
+ self.parameters = request_payload.get("Parameters", [])
+ self.stack_id = arns.cloudformation_stack_arn(
+ self.stack_name,
+ stack_id=StackIdentifier(
+ account_id=self.account_id, region=self.region_name, stack_name=self.stack_name
+ ).generate(tags=request_payload.get("Tags")),
+ account_id=self.account_id,
+ region_name=self.region_name,
+ )
+
+ # TODO: only kept for v1 compatibility
+ self.request_payload = request_payload
+
+ # state after deploy
+ self.resolved_parameters = {}
+ self.resolved_resources = {}
+ self.resolved_outputs = {}
+
+ def set_stack_status(self, status: StackStatus, reason: StackStatusReason | None = None):
+ self.status = status
+ if reason:
+ self.status_reason = reason
+
+ def describe_details(self) -> ApiStack:
+ result = {
+ "ChangeSetId": self.change_set_id,
+ "CreationTime": self.creation_time,
+ "StackId": self.stack_id,
+ "StackName": self.stack_name,
+ "StackStatus": self.status,
+ "StackStatusReason": self.status_reason,
+ # fake values
+ "DisableRollback": False,
+ "DriftInformation": StackDriftInformation(
+ StackDriftStatus=StackDriftStatus.NOT_CHECKED
+ ),
+ "EnableTerminationProtection": False,
+ "LastUpdatedTime": self.creation_time,
+ "RollbackConfiguration": {},
+ "Tags": [],
+ }
+ if self.resolved_outputs:
+ describe_outputs = []
+ for key, value in self.resolved_outputs.items():
+ describe_outputs.append(
+ Output(
+ # TODO(parity): Description, ExportName
+ # TODO(parity): what happens on describe stack when the stack has not been deployed yet?
+ OutputKey=key,
+ OutputValue=value,
+ )
+ )
+ result["Outputs"] = describe_outputs
+ return result
+
+
+class ChangeSet:
+ change_set_name: str
+ change_set_id: str
+ change_set_type: ChangeSetType
+ update_graph: NodeTemplate | None
+ status: ChangeSetStatus
+ execution_status: ExecutionStatus
+ creation_time: datetime
+
+ def __init__(
+ self,
+ stack: Stack,
+ request_payload: CreateChangeSetInput,
+ template: StackTemplate | None = None,
+ ):
+ self.stack = stack
+ self.template = template
+ self.status = ChangeSetStatus.CREATE_IN_PROGRESS
+ self.execution_status = ExecutionStatus.AVAILABLE
+ self.update_graph = None
+ self.creation_time = datetime.now(tz=timezone.utc)
+
+ self.change_set_name = request_payload["ChangeSetName"]
+ self.change_set_type = request_payload.get("ChangeSetType", ChangeSetType.UPDATE)
+ self.change_set_id = arns.cloudformation_change_set_arn(
+ self.change_set_name,
+ change_set_id=short_uid(),
+ account_id=self.stack.account_id,
+ region_name=self.stack.region_name,
+ )
+
+ def set_change_set_status(self, status: ChangeSetStatus):
+ self.status = status
+
+ def set_execution_status(self, execution_status: ExecutionStatus):
+ self.execution_status = execution_status
+
+ @property
+ def account_id(self) -> str:
+ return self.stack.account_id
+
+ @property
+ def region_name(self) -> str:
+ return self.stack.region_name
+
+ def populate_update_graph(
+ self,
+ before_template: dict | None = None,
+ after_template: dict | None = None,
+ before_parameters: dict | None = None,
+ after_parameters: dict | None = None,
+ ) -> None:
+ change_set_model = ChangeSetModel(
+ before_template=before_template,
+ after_template=after_template,
+ before_parameters=before_parameters,
+ after_parameters=after_parameters,
+ )
+ self.update_graph = change_set_model.get_update_model()
+
+ def describe_details(self, include_property_values: bool) -> DescribeChangeSetOutput:
+ change_set_describer = ChangeSetModelDescriber(
+ node_template=self.update_graph,
+ before_resolved_resources=self.stack.resolved_resources,
+ include_property_values=include_property_values,
+ )
+ changes: Changes = change_set_describer.get_changes()
+
+ result = {
+ "Status": self.status,
+ "ChangeSetType": self.change_set_type,
+ "ChangeSetId": self.change_set_id,
+ "ChangeSetName": self.change_set_name,
+ "ExecutionStatus": self.execution_status,
+ "RollbackConfiguration": {},
+ "StackId": self.stack.stack_id,
+ "StackName": self.stack.stack_name,
+ "StackStatus": self.stack.status,
+ "CreationTime": self.creation_time,
+ "LastUpdatedTime": "",
+ "DisableRollback": "",
+ "EnableTerminationProtection": "",
+ "Transform": "",
+ # TODO: mask no echo
+ "Parameters": [
+ Parameter(ParameterKey=key, ParameterValue=value)
+ for (key, value) in self.stack.resolved_parameters.items()
+ ],
+ "Changes": changes,
+ }
+ return result
diff --git a/localstack-core/localstack/services/cloudformation/v2/provider.py b/localstack-core/localstack/services/cloudformation/v2/provider.py
index 9017a0c696f51..f2a8afe509f19 100644
--- a/localstack-core/localstack/services/cloudformation/v2/provider.py
+++ b/localstack-core/localstack/services/cloudformation/v2/provider.py
@@ -1,45 +1,95 @@
-from copy import deepcopy
+import logging
+from typing import Any
from localstack.aws.api import RequestContext, handler
from localstack.aws.api.cloudformation import (
ChangeSetNameOrId,
ChangeSetNotFoundException,
+ ChangeSetStatus,
ChangeSetType,
+ ClientRequestToken,
CreateChangeSetInput,
CreateChangeSetOutput,
+ DeletionMode,
DescribeChangeSetOutput,
+ DescribeStackEventsOutput,
+ DescribeStacksOutput,
+ DisableRollback,
+ ExecuteChangeSetOutput,
+ ExecutionStatus,
IncludePropertyValues,
+ InvalidChangeSetStatusException,
NextToken,
Parameter,
+ RetainExceptOnCreate,
+ RetainResources,
+ RoleARN,
+ StackName,
StackNameOrId,
StackStatus,
)
from localstack.services.cloudformation import api_utils
-from localstack.services.cloudformation.engine import parameters as param_resolver
-from localstack.services.cloudformation.engine import template_deployer, template_preparer
-from localstack.services.cloudformation.engine.entities import Stack, StackChangeSet
-from localstack.services.cloudformation.engine.parameters import mask_no_echo, strip_parameter_type
-from localstack.services.cloudformation.engine.resource_ordering import (
- NoResourceInStack,
- order_resources,
-)
-from localstack.services.cloudformation.engine.template_utils import resolve_stack_conditions
-from localstack.services.cloudformation.engine.v2.change_set_model_describer import (
- ChangeSetModelDescriber,
+from localstack.services.cloudformation.engine import template_preparer
+from localstack.services.cloudformation.engine.v2.change_set_model_executor import (
+ ChangeSetModelExecutor,
)
from localstack.services.cloudformation.engine.validations import ValidationError
from localstack.services.cloudformation.provider import (
ARN_CHANGESET_REGEX,
ARN_STACK_REGEX,
CloudformationProvider,
- clone_stack_params,
)
from localstack.services.cloudformation.stores import (
- find_change_set,
- find_stack,
+ CloudFormationStore,
get_cloudformation_store,
)
-from localstack.utils.collections import remove_attributes
+from localstack.services.cloudformation.v2.entities import ChangeSet, Stack
+from localstack.utils.threads import start_worker_thread
+
+LOG = logging.getLogger(__name__)
+
+
+def is_stack_arn(stack_name_or_id: str) -> bool:
+ return ARN_STACK_REGEX.match(stack_name_or_id) is not None
+
+
+def is_changeset_arn(change_set_name_or_id: str) -> bool:
+ return ARN_CHANGESET_REGEX.match(change_set_name_or_id) is not None
+
+
+def find_change_set_v2(
+ state: CloudFormationStore, change_set_name: str, stack_name: str | None = None
+) -> ChangeSet | None:
+ change_set: ChangeSet | None = None
+ if is_changeset_arn(change_set_name):
+ change_set = state.change_sets[change_set_name]
+ else:
+ if stack_name is not None:
+ stack: Stack | None = None
+ if is_stack_arn(stack_name):
+ stack = state.stacks_v2[stack_name]
+ else:
+ for stack_candidate in state.stacks_v2.values():
+ # TODO: check for active stacks
+ if (
+ stack_candidate.stack_name == stack_name
+ and stack.status != StackStatus.DELETE_COMPLETE
+ ):
+ stack = stack_candidate
+ break
+
+ if not stack:
+ raise NotImplementedError(f"no stack found for change set {change_set_name}")
+
+ for change_set_id in stack.change_set_ids:
+ change_set_candidate = state.change_sets[change_set_id]
+ if change_set_candidate.change_set_name == change_set_name:
+ change_set = change_set_candidate
+ break
+ else:
+ raise NotImplementedError
+
+ return change_set
class CloudformationProviderV2(CloudformationProvider):
@@ -47,15 +97,23 @@ class CloudformationProviderV2(CloudformationProvider):
def create_change_set(
self, context: RequestContext, request: CreateChangeSetInput
) -> CreateChangeSetOutput:
+ try:
+ stack_name = request["StackName"]
+ except KeyError:
+ # TODO: proper exception
+ raise ValidationError("StackName must be specified")
+ try:
+ change_set_name = request["ChangeSetName"]
+ except KeyError:
+ # TODO: proper exception
+ raise ValidationError("StackName must be specified")
+
state = get_cloudformation_store(context.account_id, context.region)
- req_params = request
- change_set_type = req_params.get("ChangeSetType", "UPDATE")
- stack_name = req_params.get("StackName")
- change_set_name = req_params.get("ChangeSetName")
- template_body = req_params.get("TemplateBody")
+ change_set_type = request.get("ChangeSetType", "UPDATE")
+ template_body = request.get("TemplateBody")
# s3 or secretsmanager url
- template_url = req_params.get("TemplateURL")
+ template_url = request.get("TemplateURL")
# validate and resolve template
if template_body and template_url:
@@ -68,29 +126,19 @@ def create_change_set(
"Specify exactly one of 'TemplateBody' or 'TemplateUrl'"
) # TODO: check proper message
- api_utils.prepare_template_body(
- req_params
- ) # TODO: function has too many unclear responsibilities
- if not template_body:
- template_body = req_params[
- "TemplateBody"
- ] # should then have been set by prepare_template_body
- template = template_preparer.parse_template(req_params["TemplateBody"])
-
- del req_params["TemplateBody"] # TODO: stop mutating req_params
- template["StackName"] = stack_name
- # TODO: validate with AWS what this is actually doing?
- template["ChangeSetName"] = change_set_name
+ template_body = api_utils.extract_template_body(request)
+ structured_template = template_preparer.parse_template(template_body)
# this is intentionally not in a util yet. Let's first see how the different operations deal with these before generalizing
# handle ARN stack_name here (not valid for initial CREATE, since stack doesn't exist yet)
- if ARN_STACK_REGEX.match(stack_name):
- if not (stack := state.stacks.get(stack_name)):
+ if is_stack_arn(stack_name):
+ stack = state.stacks_v2.get(stack_name)
+ if not stack:
raise ValidationError(f"Stack '{stack_name}' does not exist.")
else:
# stack name specified, so fetch the stack by name
stack_candidates: list[Stack] = [
- s for stack_arn, s in state.stacks.items() if s.stack_name == stack_name
+ s for stack_arn, s in state.stacks_v2.items() if s.stack_name == stack_name
]
active_stack_candidates = [
s for s in stack_candidates if self._stack_status_is_active(s.status)
@@ -98,23 +146,21 @@ def create_change_set(
# on a CREATE an empty Stack should be generated if we didn't find an active one
if not active_stack_candidates and change_set_type == ChangeSetType.CREATE:
- empty_stack_template = dict(template)
- empty_stack_template["Resources"] = {}
- req_params_copy = clone_stack_params(req_params)
stack = Stack(
context.account_id,
context.region,
- req_params_copy,
- empty_stack_template,
+ request,
+ structured_template,
template_body=template_body,
)
- state.stacks[stack.stack_id] = stack
- stack.set_stack_status("REVIEW_IN_PROGRESS")
+ state.stacks_v2[stack.stack_id] = stack
else:
if not active_stack_candidates:
raise ValidationError(f"Stack '{stack_name}' does not exist.")
stack = active_stack_candidates[0]
+ stack.set_stack_status(StackStatus.REVIEW_IN_PROGRESS)
+
# TODO: test if rollback status is allowed as well
if (
change_set_type == ChangeSetType.CREATE
@@ -124,14 +170,15 @@ def create_change_set(
f"Stack [{stack_name}] already exists and cannot be created again with the changeSet [{change_set_name}]."
)
- old_parameters: dict[str, Parameter] = {}
+ before_parameters: dict[str, Parameter] | None = None
match change_set_type:
case ChangeSetType.UPDATE:
+ before_parameters = stack.resolved_parameters
# add changeset to existing stack
- old_parameters = {
- k: mask_no_echo(strip_parameter_type(v))
- for k, v in stack.resolved_parameters.items()
- }
+ # old_parameters = {
+ # k: mask_no_echo(strip_parameter_type(v))
+ # for k, v in stack.resolved_parameters.items()
+ # }
case ChangeSetType.IMPORT:
raise NotImplementedError() # TODO: implement importing resources
case ChangeSetType.CREATE:
@@ -143,139 +190,199 @@ def create_change_set(
)
raise ValidationError(msg)
- # resolve parameters
- new_parameters: dict[str, Parameter] = param_resolver.convert_stack_parameters_to_dict(
- request.get("Parameters")
- )
- parameter_declarations = param_resolver.extract_stack_parameter_declarations(template)
- resolved_parameters = param_resolver.resolve_parameters(
- account_id=context.account_id,
- region_name=context.region,
- parameter_declarations=parameter_declarations,
- new_parameters=new_parameters,
- old_parameters=old_parameters,
- )
+ # TDOO: transformations
- # TODO: remove this when fixing Stack.resources and transformation order
- # currently we need to create a stack with existing resources + parameters so that resolve refs recursively in here will work.
- # The correct way to do it would be at a later stage anyway just like a normal intrinsic function
- req_params_copy = clone_stack_params(req_params)
- temp_stack = Stack(context.account_id, context.region, req_params_copy, template)
- temp_stack.set_resolved_parameters(resolved_parameters)
-
- # TODO: everything below should be async
- # apply template transformations
- transformed_template = template_preparer.transform_template(
- context.account_id,
- context.region,
- template,
- stack_name=temp_stack.stack_name,
- resources=temp_stack.resources,
- mappings=temp_stack.mappings,
- conditions={}, # TODO: we don't have any resolved conditions yet at this point but we need the conditions because of the samtranslator...
- resolved_parameters=resolved_parameters,
- )
+ # TODO: reconsider the way parameters are modelled in the update graph process.
+ # The options might be reduce to using the current style, or passing the extra information
+ # as a metadata object. The choice should be made considering when the extra information
+ # is needed for the update graph building, or only looked up in downstream tasks (metadata).
+ request_parameters = request.get("Parameters", list())
+ # TODO: handle parameter defaults and resolution
+ after_parameters: dict[str, Any] = {
+ parameter["ParameterKey"]: parameter["ParameterValue"]
+ for parameter in request_parameters
+ }
+
+ # TODO: update this logic to always pass the clean template object if one exists. The
+ # current issue with relaying on stack.template_original is that this appears to have
+ # its parameters and conditions populated.
+ before_template = None
+ if change_set_type == ChangeSetType.UPDATE:
+ before_template = stack.template
+ after_template = structured_template
# create change set for the stack and apply changes
- change_set = StackChangeSet(
- context.account_id, context.region, stack, req_params, transformed_template
- )
+ change_set = ChangeSet(stack, request)
+
# only set parameters for the changeset, then switch to stack on execute_change_set
- change_set.template_body = template_body
- change_set.populate_update_graph(stack.template, transformed_template)
-
- # TODO: evaluate conditions
- raw_conditions = transformed_template.get("Conditions", {})
- resolved_stack_conditions = resolve_stack_conditions(
- account_id=context.account_id,
- region_name=context.region,
- conditions=raw_conditions,
- parameters=resolved_parameters,
- mappings=temp_stack.mappings,
- stack_name=stack_name,
+ change_set.populate_update_graph(
+ before_template=before_template,
+ after_template=after_template,
+ before_parameters=before_parameters,
+ after_parameters=after_parameters,
)
- change_set.set_resolved_stack_conditions(resolved_stack_conditions)
+ change_set.set_change_set_status(ChangeSetStatus.CREATE_COMPLETE)
+ stack.change_set_id = change_set.change_set_id
+ stack.change_set_id = change_set.change_set_id
+ state.change_sets[change_set.change_set_id] = change_set
- # a bit gross but use the template ordering to validate missing resources
- try:
- order_resources(
- transformed_template["Resources"],
- resolved_parameters=resolved_parameters,
- resolved_conditions=resolved_stack_conditions,
+ return CreateChangeSetOutput(StackId=stack.stack_id, Id=change_set.change_set_id)
+
+ @handler("ExecuteChangeSet")
+ def execute_change_set(
+ self,
+ context: RequestContext,
+ change_set_name: ChangeSetNameOrId,
+ stack_name: StackNameOrId | None = None,
+ client_request_token: ClientRequestToken | None = None,
+ disable_rollback: DisableRollback | None = None,
+ retain_except_on_create: RetainExceptOnCreate | None = None,
+ **kwargs,
+ ) -> ExecuteChangeSetOutput:
+ state = get_cloudformation_store(context.account_id, context.region)
+
+ change_set = find_change_set_v2(state, change_set_name, stack_name)
+ if not change_set:
+ raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist")
+
+ if change_set.execution_status != ExecutionStatus.AVAILABLE:
+ LOG.debug("Change set %s not in execution status 'AVAILABLE'", change_set_name)
+ raise InvalidChangeSetStatusException(
+ f"ChangeSet [{change_set.change_set_id}] cannot be executed in its current status of [{change_set.status}]"
)
- except NoResourceInStack as e:
- raise ValidationError(str(e)) from e
+ # LOG.debug(
+ # 'Executing change set "%s" for stack "%s" with %s resources ...',
+ # change_set_name,
+ # stack_name,
+ # len(change_set.template_resources),
+ # )
+ if not change_set.update_graph:
+ raise RuntimeError("Programming error: no update graph found for change set")
- deployer = template_deployer.TemplateDeployer(
- context.account_id, context.region, change_set
+ change_set.set_execution_status(ExecutionStatus.EXECUTE_IN_PROGRESS)
+ change_set.stack.set_stack_status(
+ StackStatus.UPDATE_IN_PROGRESS
+ if change_set.change_set_type == ChangeSetType.UPDATE
+ else StackStatus.CREATE_IN_PROGRESS
)
- changes = deployer.construct_changes(
- stack,
+
+ change_set_executor = ChangeSetModelExecutor(
change_set,
- change_set_id=change_set.change_set_id,
- append_to_changeset=True,
- filter_unchanged_resources=True,
)
- stack.change_sets.append(change_set)
- if not changes:
- change_set.metadata["Status"] = "FAILED"
- change_set.metadata["ExecutionStatus"] = "UNAVAILABLE"
- change_set.metadata["StatusReason"] = (
- "The submitted information didn't contain changes. Submit different information to create a change set."
- )
- else:
- change_set.metadata["Status"] = (
- "CREATE_COMPLETE" # technically for some time this should first be CREATE_PENDING
- )
- change_set.metadata["ExecutionStatus"] = (
- "AVAILABLE" # technically for some time this should first be UNAVAILABLE
- )
- return CreateChangeSetOutput(StackId=change_set.stack_id, Id=change_set.change_set_id)
+ def _run(*args):
+ result = change_set_executor.execute()
+ new_stack_status = StackStatus.UPDATE_COMPLETE
+ if change_set.change_set_type == ChangeSetType.CREATE:
+ new_stack_status = StackStatus.CREATE_COMPLETE
+ change_set.stack.set_stack_status(new_stack_status)
+ change_set.set_execution_status(ExecutionStatus.EXECUTE_COMPLETE)
+ change_set.stack.resolved_resources = result.resources
+ change_set.stack.resolved_parameters = result.parameters
+ change_set.stack.resolved_outputs = result.outputs
+
+ start_worker_thread(_run)
+
+ return ExecuteChangeSetOutput()
@handler("DescribeChangeSet")
def describe_change_set(
self,
context: RequestContext,
change_set_name: ChangeSetNameOrId,
- stack_name: StackNameOrId = None,
- next_token: NextToken = None,
- include_property_values: IncludePropertyValues = None,
+ stack_name: StackNameOrId | None = None,
+ next_token: NextToken | None = None,
+ include_property_values: IncludePropertyValues | None = None,
**kwargs,
) -> DescribeChangeSetOutput:
# TODO add support for include_property_values
# only relevant if change_set_name isn't an ARN
- if not ARN_CHANGESET_REGEX.match(change_set_name):
- if not stack_name:
- raise ValidationError(
- "StackName must be specified if ChangeSetName is not specified as an ARN."
- )
-
- stack = find_stack(context.account_id, context.region, stack_name)
- if not stack:
- raise ValidationError(f"Stack [{stack_name}] does not exist")
-
- change_set = find_change_set(
- context.account_id, context.region, change_set_name, stack_name=stack_name
- )
+ state = get_cloudformation_store(context.account_id, context.region)
+ change_set = find_change_set_v2(state, change_set_name, stack_name)
if not change_set:
raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist")
- change_set_describer = ChangeSetModelDescriber(node_template=change_set.update_graph)
- resource_changes = change_set_describer.get_resource_changes()
-
- attrs = [
- "ChangeSetType",
- "StackStatus",
- "LastUpdatedTime",
- "DisableRollback",
- "EnableTerminationProtection",
- "Transform",
- ]
- result = remove_attributes(deepcopy(change_set.metadata), attrs)
- # TODO: replace this patch with a better solution
- result["Parameters"] = [
- mask_no_echo(strip_parameter_type(p)) for p in result.get("Parameters", [])
- ]
- result["Changes"] = resource_changes
+ result = change_set.describe_details(
+ include_property_values=include_property_values or False
+ )
return result
+
+ @handler("DescribeStacks")
+ def describe_stacks(
+ self,
+ context: RequestContext,
+ stack_name: StackName = None,
+ next_token: NextToken = None,
+ **kwargs,
+ ) -> DescribeStacksOutput:
+ state = get_cloudformation_store(context.account_id, context.region)
+ if stack_name:
+ if is_stack_arn(stack_name):
+ stack = state.stacks_v2[stack_name]
+ else:
+ stack_candidates = []
+ for stack in state.stacks_v2.values():
+ if (
+ stack.stack_name == stack_name
+ and stack.status != StackStatus.DELETE_COMPLETE
+ ):
+ stack_candidates.append(stack)
+ if len(stack_candidates) == 0:
+ raise ValidationError(f"No stack with name {stack_name} found")
+ elif len(stack_candidates) > 1:
+ raise RuntimeError("Programing error, duplicate stacks found")
+ else:
+ stack = stack_candidates[0]
+ else:
+ raise NotImplementedError
+
+ return DescribeStacksOutput(Stacks=[stack.describe_details()])
+
+ @handler("DescribeStackEvents")
+ def describe_stack_events(
+ self,
+ context: RequestContext,
+ stack_name: StackName = None,
+ next_token: NextToken = None,
+ **kwargs,
+ ) -> DescribeStackEventsOutput:
+ return DescribeStackEventsOutput(StackEvents=[])
+
+ @handler("DeleteStack")
+ def delete_stack(
+ self,
+ context: RequestContext,
+ stack_name: StackName,
+ retain_resources: RetainResources = None,
+ role_arn: RoleARN = None,
+ client_request_token: ClientRequestToken = None,
+ deletion_mode: DeletionMode = None,
+ **kwargs,
+ ) -> None:
+ state = get_cloudformation_store(context.account_id, context.region)
+ if stack_name:
+ if is_stack_arn(stack_name):
+ stack = state.stacks_v2[stack_name]
+ else:
+ stack_candidates = []
+ for stack in state.stacks_v2.values():
+ if (
+ stack.stack_name == stack_name
+ and stack.status != StackStatus.DELETE_COMPLETE
+ ):
+ stack_candidates.append(stack)
+ if len(stack_candidates) == 0:
+ raise ValidationError(f"No stack with name {stack_name} found")
+ elif len(stack_candidates) > 1:
+ raise RuntimeError("Programing error, duplicate stacks found")
+ else:
+ stack = stack_candidates[0]
+ else:
+ raise NotImplementedError
+
+ if not stack:
+ # aws will silently ignore invalid stack names - we should do the same
+ return
+
+ # TODO: actually delete
+ stack.set_stack_status(StackStatus.DELETE_COMPLETE)
diff --git a/localstack-core/localstack/services/cloudformation/v2/utils.py b/localstack-core/localstack/services/cloudformation/v2/utils.py
new file mode 100644
index 0000000000000..02a6cbb971a99
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/v2/utils.py
@@ -0,0 +1,5 @@
+from localstack import config
+
+
+def is_v2_engine() -> bool:
+ return config.SERVICE_PROVIDER_CONFIG.get_provider("cloudformation") == "engine-v2"
diff --git a/localstack-core/localstack/services/ec2/patches.py b/localstack-core/localstack/services/ec2/patches.py
index d9db4cad11e08..d2037015905ef 100644
--- a/localstack-core/localstack/services/ec2/patches.py
+++ b/localstack-core/localstack/services/ec2/patches.py
@@ -2,7 +2,7 @@
from typing import Optional
from moto.ec2 import models as ec2_models
-from moto.utilities.id_generator import TAG_KEY_CUSTOM_ID, Tags
+from moto.utilities.id_generator import Tags
from localstack.services.ec2.exceptions import (
InvalidSecurityGroupDuplicateCustomIdError,
@@ -29,6 +29,16 @@ def generate_vpc_id(
return ""
+@localstack_id
+def generate_security_group_id(
+ resource_identifier: ResourceIdentifier,
+ existing_ids: ExistingIds = None,
+ tags: Tags = None,
+) -> str:
+ # We return an empty string here to differentiate between when a custom ID was used, or when it was randomly generated by `moto`.
+ return ""
+
+
@localstack_id
def generate_subnet_id(
resource_identifier: ResourceIdentifier,
@@ -54,6 +64,19 @@ def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str:
)
+class SecurityGroupIdentifier(ResourceIdentifier):
+ service = "ec2"
+ resource = "securitygroup"
+
+ def __init__(self, account_id: str, region: str, vpc_id: str, group_name: str):
+ super().__init__(account_id, region, name=f"sg-{vpc_id}-{group_name}")
+
+ def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str:
+ return generate_security_group_id(
+ resource_identifier=self, existing_ids=existing_ids, tags=tags
+ )
+
+
class SubnetIdentifier(ResourceIdentifier):
service = "ec2"
resource = "subnet"
@@ -78,15 +101,22 @@ def ec2_create_subnet(
tags: Optional[dict[str, str]] = None,
**kwargs,
):
+ # Patch this method so that we can create a subnet with a specific "custom"
+ # ID. The custom ID that we will use is contained within a special tag.
vpc_id: str = args[0] if len(args) >= 1 else kwargs["vpc_id"]
cidr_block: str = args[1] if len(args) >= 1 else kwargs["cidr_block"]
resource_identifier = SubnetIdentifier(
self.account_id, self.region_name, vpc_id, cidr_block
)
- # tags has the format: {"subnet": {"Key": ..., "Value": ...}}
+
+ # tags has the format: {"subnet": {"Key": ..., "Value": ...}}, but we need
+ # to pass this to the generate method as {"Key": ..., "Value": ...}. Take
+ # care not to alter the original tags dict otherwise moto will not be able
+ # to understand it.
+ subnet_tags = None
if tags is not None:
- tags = tags.get("subnet", tags)
- custom_id = resource_identifier.generate(tags=tags)
+ subnet_tags = tags.get("subnet", tags)
+ custom_id = resource_identifier.generate(tags=subnet_tags)
if custom_id:
# Check if custom id is unique within a given VPC
@@ -102,9 +132,16 @@ def ec2_create_subnet(
if custom_id:
# Remove the subnet from the default dict and add it back with the custom id
self.subnets[availability_zone].pop(result.id)
+ old_id = result.id
result.id = custom_id
self.subnets[availability_zone][custom_id] = result
+ # Tags are not stored in the Subnet object, but instead stored in a separate
+ # dict in the EC2 backend, keyed by subnet id. That therefore requires
+ # updating as well.
+ if old_id in self.tags:
+ self.tags[custom_id] = self.tags.pop(old_id)
+
# Return the subnet with the patched custom id
return result
@@ -112,29 +149,40 @@ def ec2_create_subnet(
def ec2_create_security_group(
fn: ec2_models.security_groups.SecurityGroupBackend.create_security_group,
self: ec2_models.security_groups.SecurityGroupBackend,
+ name: str,
*args,
+ vpc_id: Optional[str] = None,
tags: Optional[dict[str, str]] = None,
force: bool = False,
**kwargs,
):
- # Extract tags and custom ID
- tags: dict[str, str] = tags or {}
- custom_id = tags.get(TAG_KEY_CUSTOM_ID)
+ vpc_id = vpc_id or self.default_vpc.id
+ resource_identifier = SecurityGroupIdentifier(
+ self.account_id, self.region_name, vpc_id, name
+ )
+ custom_id = resource_identifier.generate(tags=tags)
if not force and self.get_security_group_from_id(custom_id):
raise InvalidSecurityGroupDuplicateCustomIdError(custom_id)
# Generate security group with moto library
result: ec2_models.security_groups.SecurityGroup = fn(
- self, *args, tags=tags, force=force, **kwargs
+ self, name, *args, vpc_id=vpc_id, tags=tags, force=force, **kwargs
)
if custom_id:
# Remove the security group from the default dict and add it back with the custom id
self.groups[result.vpc_id].pop(result.group_id)
+ old_id = result.group_id
result.group_id = result.id = custom_id
self.groups[result.vpc_id][custom_id] = result
+ # Tags are not stored in the Security Group object, but instead are stored in a
+ # separate dict in the EC2 backend, keyed by id. That therefore requires
+ # updating as well.
+ if old_id in self.tags:
+ self.tags[custom_id] = self.tags.pop(old_id)
+
return result
@patch(ec2_models.vpcs.VPCBackend.create_vpc)
@@ -175,9 +223,16 @@ def ec2_create_vpc(
# Remove the VPC from the default dict and add it back with the custom id
self.vpcs.pop(vpc_id)
+ old_id = result.id
result.id = custom_id
self.vpcs[custom_id] = result
+ # Tags are not stored in the VPC object, but instead stored in a separate
+ # dict in the EC2 backend, keyed by VPC id. That therefore requires
+ # updating as well.
+ if old_id in self.tags:
+ self.tags[custom_id] = self.tags.pop(old_id)
+
# Create default network ACL, route table, and security group for custom ID VPC
self.create_route_table(
vpc_id=custom_id,
diff --git a/localstack-core/localstack/services/events/api_destination.py b/localstack-core/localstack/services/events/api_destination.py
index a7fe116eaed21..0bb9f097ffb4b 100644
--- a/localstack-core/localstack/services/events/api_destination.py
+++ b/localstack-core/localstack/services/events/api_destination.py
@@ -64,6 +64,23 @@ def __init__(
description,
)
+ @classmethod
+ def restore_from_api_destination_and_connection(
+ cls, api_destination: ApiDestination, connection: Connection
+ ):
+ api_destination_service = cls(
+ name=api_destination.name,
+ region=api_destination.region,
+ account_id=api_destination.account_id,
+ connection_arn=api_destination.connection_arn,
+ connection=connection,
+ invocation_endpoint=api_destination.invocation_endpoint,
+ http_method=api_destination.http_method,
+ invocation_rate_limit_per_second=api_destination.invocation_rate_limit_per_second,
+ )
+ api_destination_service.api_destination = api_destination
+ return api_destination_service
+
@property
def arn(self) -> Arn:
return self.api_destination.arn
diff --git a/localstack-core/localstack/services/events/connection.py b/localstack-core/localstack/services/events/connection.py
index bb855c9203e0c..c2b72a2025328 100644
--- a/localstack-core/localstack/services/events/connection.py
+++ b/localstack-core/localstack/services/events/connection.py
@@ -32,12 +32,16 @@ def __init__(
auth_parameters: CreateConnectionAuthRequestParameters,
description: ConnectionDescription | None = None,
invocation_connectivity_parameters: ConnectivityResourceParameters | None = None,
+ create_secret: bool = True,
):
self._validate_input(name, authorization_type)
state = self._get_initial_state(authorization_type)
- secret_arn = self.create_connection_secret(
- region, account_id, name, authorization_type, auth_parameters
- )
+
+ secret_arn = None
+ if create_secret:
+ secret_arn = self.create_connection_secret(
+ region, account_id, name, authorization_type, auth_parameters
+ )
public_auth_parameters = self._get_public_parameters(authorization_type, auth_parameters)
self.connection = Connection(
@@ -52,6 +56,19 @@ def __init__(
invocation_connectivity_parameters,
)
+ @classmethod
+ def restore_from_connection(cls, connection: Connection):
+ connection_service = cls(
+ connection.name,
+ connection.region,
+ connection.account_id,
+ connection.authorization_type,
+ connection.auth_parameters,
+ create_secret=False,
+ )
+ connection_service.connection = connection
+ return connection_service
+
@property
def arn(self) -> Arn:
return self.connection.arn
diff --git a/localstack-core/localstack/services/events/provider.py b/localstack-core/localstack/services/events/provider.py
index a51fd805288e5..67e5e1bd9763e 100644
--- a/localstack-core/localstack/services/events/provider.py
+++ b/localstack-core/localstack/services/events/provider.py
@@ -44,6 +44,7 @@
DescribeReplayResponse,
DescribeRuleResponse,
EndpointId,
+ EventBusArn,
EventBusDescription,
EventBusList,
EventBusName,
@@ -170,6 +171,7 @@
from localstack.utils.event_matcher import matches_event
from localstack.utils.strings import long_uid
from localstack.utils.time import TIMESTAMP_FORMAT_TZ, timestamp
+from localstack.utils.xray.trace_header import TraceHeader
from .analytics import InvocationStatus, rule_invocation
@@ -394,8 +396,10 @@ def create_connection(
auth_parameters: CreateConnectionAuthRequestParameters,
description: ConnectionDescription = None,
invocation_connectivity_parameters: ConnectivityResourceParameters = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> CreateConnectionResponse:
+ # TODO add support for kms_key_identifier
region = context.region
account_id = context.account_id
store = self.get_store(region, account_id)
@@ -488,8 +492,10 @@ def update_connection(
authorization_type: ConnectionAuthorizationType = None,
auth_parameters: UpdateConnectionAuthRequestParameters = None,
invocation_connectivity_parameters: ConnectivityResourceParameters = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> UpdateConnectionResponse:
+ # TODO add support for kms_key_identifier
region = context.region
account_id = context.account_id
store = self.get_store(region, account_id)
@@ -921,12 +927,14 @@ def create_archive(
self,
context: RequestContext,
archive_name: ArchiveName,
- event_source_arn: Arn,
+ event_source_arn: EventBusArn,
description: ArchiveDescription = None,
event_pattern: EventPattern = None,
retention_days: RetentionDays = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> CreateArchiveResponse:
+ # TODO add support for kms_key_identifier
region = context.region
account_id = context.account_id
store = self.get_store(region, account_id)
@@ -1022,8 +1030,10 @@ def update_archive(
description: ArchiveDescription = None,
event_pattern: EventPattern = None,
retention_days: RetentionDays = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> UpdateArchiveResponse:
+ # TODO add support for kms_key_identifier
region = context.region
account_id = context.account_id
store = self.get_store(region, account_id)
@@ -1536,8 +1546,11 @@ def func(*args, **kwargs):
}
target_unique_id = f"{rule.arn}-{target['Id']}"
target_sender = self._target_sender_store[target_unique_id]
+ new_trace_header = (
+ TraceHeader().ensure_root_exists()
+ ) # scheduled events will always start a new trace
try:
- target_sender.process_event(event.copy())
+ target_sender.process_event(event.copy(), trace_header=new_trace_header)
except Exception as e:
LOG.info(
"Unable to send event notification %s to target %s: %s",
@@ -1809,6 +1822,8 @@ def _process_entry(
return
region, account_id = extract_region_and_account_id(event_bus_name_or_arn, context)
+
+ # TODO check interference with x-ray trace header
if encoded_trace_header := get_trace_header_encoded_region_account(
entry, context.region, context.account_id, region, account_id
):
@@ -1832,14 +1847,16 @@ def _process_entry(
)
return
- self._proxy_capture_input_event(event_formatted)
+ trace_header = context.trace_context["aws_trace_header"]
+
+ self._proxy_capture_input_event(event_formatted, trace_header, region, account_id)
# Always add the successful EventId entry, even if target processing might fail
processed_entries.append({"EventId": event_formatted["id"]})
if configured_rules := list(event_bus.rules.values()):
for rule in configured_rules:
- self._process_rules(rule, region, account_id, event_formatted)
+ self._process_rules(rule, region, account_id, event_formatted, trace_header)
else:
LOG.info(
json.dumps(
@@ -1850,8 +1867,10 @@ def _process_entry(
)
)
- def _proxy_capture_input_event(self, event: FormattedEvent) -> None:
- # only required for eventstudio to capture input event if no rule is configured
+ def _proxy_capture_input_event(
+ self, event: FormattedEvent, trace_header: TraceHeader, region: str, account_id: str
+ ) -> None:
+ # only required for EventStudio to capture input event if no rule is configured
pass
def _process_rules(
@@ -1860,6 +1879,7 @@ def _process_rules(
region: str,
account_id: str,
event_formatted: FormattedEvent,
+ trace_header: TraceHeader,
) -> None:
"""Process rules for an event. Note that we no longer handle entries here as AWS returns success regardless of target failures."""
event_pattern = rule.event_pattern
@@ -1889,7 +1909,7 @@ def _process_rules(
target_unique_id = f"{rule.arn}-{target_id}"
target_sender = self._target_sender_store[target_unique_id]
try:
- target_sender.process_event(event_formatted.copy())
+ target_sender.process_event(event_formatted.copy(), trace_header)
rule_invocation.labels(
status=InvocationStatus.success,
service=target_sender.service,
diff --git a/localstack-core/localstack/services/events/target.py b/localstack-core/localstack/services/events/target.py
index b12691f28925e..fe18ce999412c 100644
--- a/localstack-core/localstack/services/events/target.py
+++ b/localstack-core/localstack/services/events/target.py
@@ -47,6 +47,7 @@
from localstack.utils.json import extract_jsonpath
from localstack.utils.strings import to_bytes
from localstack.utils.time import now_utc
+from localstack.utils.xray.trace_header import TraceHeader
LOG = logging.getLogger(__name__)
@@ -63,6 +64,7 @@
)
TRANSFORMER_PLACEHOLDER_PATTERN = re.compile(r"<(.*?)>")
+TRACE_HEADER_KEY = "X-Amzn-Trace-Id"
def transform_event_with_target_input_path(
@@ -193,10 +195,10 @@ def client(self):
return self._client
@abstractmethod
- def send_event(self, event: FormattedEvent | TransformedEvent):
+ def send_event(self, event: FormattedEvent | TransformedEvent, trace_header: TraceHeader):
pass
- def process_event(self, event: FormattedEvent):
+ def process_event(self, event: FormattedEvent, trace_header: TraceHeader):
"""Processes the event and send it to the target."""
if input_ := self.target.get("Input"):
event = json.loads(input_)
@@ -208,7 +210,7 @@ def process_event(self, event: FormattedEvent):
if input_transformer := self.target.get("InputTransformer"):
event = self.transform_event_with_target_input_transformer(input_transformer, event)
if event:
- self.send_event(event)
+ self.send_event(event, trace_header)
else:
LOG.info("No event to send to target %s", self.target.get("Id"))
@@ -257,6 +259,7 @@ def _initialize_client(self) -> BaseClient:
client = client.request_metadata(
service_principal=service_principal, source_arn=self.rule_arn
)
+ self._register_client_hooks(client)
return client
def _validate_input_transformer(self, input_transformer: InputTransformer):
@@ -287,6 +290,24 @@ def _get_predefined_template_replacements(self, event: FormattedEvent) -> dict[s
return predefined_template_replacements
+ def _register_client_hooks(self, client: BaseClient):
+ """Register client hooks to inject trace header into requests."""
+
+ def handle_extract_params(params, context, **kwargs):
+ trace_header = params.pop("TraceHeader", None)
+ if trace_header is None:
+ return
+ context[TRACE_HEADER_KEY] = trace_header.to_header_str()
+
+ def handle_inject_headers(params, context, **kwargs):
+ if trace_header_str := context.pop(TRACE_HEADER_KEY, None):
+ params["headers"][TRACE_HEADER_KEY] = trace_header_str
+
+ client.meta.events.register(
+ f"provide-client-params.{self.service}.*", handle_extract_params
+ )
+ client.meta.events.register(f"before-call.{self.service}.*", handle_inject_headers)
+
TargetSenderDict = dict[str, TargetSender] # rule_arn-target_id as global unique id
@@ -316,7 +337,7 @@ class ApiGatewayTargetSender(TargetSender):
ALLOWED_HTTP_METHODS = {"GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"}
- def send_event(self, event):
+ def send_event(self, event, trace_header):
# Parse the ARN to extract api_id, stage_name, http_method, and resource path
# Example ARN: arn:{partition}:execute-api:{region}:{account_id}:{api_id}/{stage_name}/{method}/{resource_path}
arn_parts = parse_arn(self.target["Arn"])
@@ -383,6 +404,9 @@ def send_event(self, event):
# Serialize the event, converting datetime objects to strings
event_json = json.dumps(event, default=str)
+ # Add trace header
+ headers[TRACE_HEADER_KEY] = trace_header.to_header_str()
+
# Send the HTTP request
response = requests.request(
method=http_method, url=url, headers=headers, data=event_json, timeout=5
@@ -415,12 +439,12 @@ def _get_predefined_template_replacements(self, event: Dict[str, Any]) -> Dict[s
class AppSyncTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
raise NotImplementedError("AppSync target is not yet implemented")
class BatchTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
raise NotImplementedError("Batch target is not yet implemented")
def _validate_input(self, target: Target):
@@ -433,7 +457,7 @@ def _validate_input(self, target: Target):
class ECSTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
raise NotImplementedError("ECS target is a pro feature, please use LocalStack Pro")
def _validate_input(self, target: Target):
@@ -444,7 +468,7 @@ def _validate_input(self, target: Target):
class EventsTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
# TODO add validation and tests for eventbridge to eventbridge requires Detail, DetailType, and Source
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/events/client/put_events.html
source = self._get_source(event)
@@ -464,7 +488,8 @@ def send_event(self, event):
event, self.region, self.account_id, self.target_region, self.target_account_id
):
entries[0]["TraceHeader"] = encoded_original_id
- self.client.put_events(Entries=entries)
+
+ self.client.put_events(Entries=entries, TraceHeader=trace_header)
def _get_source(self, event: FormattedEvent | TransformedEvent) -> str:
if isinstance(event, dict) and (source := event.get("source")):
@@ -486,7 +511,7 @@ def _get_resources(self, event: FormattedEvent | TransformedEvent) -> list[str]:
class EventsApiDestinationTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
"""Send an event to an EventBridge API destination
See https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-destinations.html"""
target_arn = self.target["Arn"]
@@ -520,6 +545,9 @@ def send_event(self, event):
if http_parameters := self.target.get("HttpParameters"):
endpoint = add_target_http_parameters(http_parameters, endpoint, headers, event)
+ # add trace header
+ headers[TRACE_HEADER_KEY] = trace_header.to_header_str()
+
result = requests.request(
method=method, url=endpoint, data=json.dumps(event or {}), headers=headers
)
@@ -532,8 +560,9 @@ def send_event(self, event):
class FirehoseTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
delivery_stream_name = firehose_name(self.target["Arn"])
+
self.client.put_record(
DeliveryStreamName=delivery_stream_name,
Record={"Data": to_bytes(to_json_str(event))},
@@ -541,7 +570,7 @@ def send_event(self, event):
class KinesisTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
partition_key_path = collections.get_safe(
self.target,
"$.KinesisParameters.PartitionKeyPath",
@@ -549,6 +578,7 @@ def send_event(self, event):
)
stream_name = self.target["Arn"].split("/")[-1]
partition_key = collections.get_safe(event, partition_key_path, event["id"])
+
self.client.put_record(
StreamName=stream_name,
Data=to_bytes(to_json_str(event)),
@@ -565,18 +595,20 @@ def _validate_input(self, target: Target):
class LambdaTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
self.client.invoke(
FunctionName=self.target["Arn"],
Payload=to_bytes(to_json_str(event)),
InvocationType="Event",
+ TraceHeader=trace_header,
)
class LogsTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
log_group_name = self.target["Arn"].split(":")[6]
log_stream_name = str(uuid.uuid4()) # Unique log stream name
+
self.client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
self.client.put_log_events(
logGroupName=log_group_name,
@@ -591,7 +623,7 @@ def send_event(self, event):
class RedshiftTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
raise NotImplementedError("Redshift target is not yet implemented")
def _validate_input(self, target: Target):
@@ -602,20 +634,21 @@ def _validate_input(self, target: Target):
class SagemakerTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
raise NotImplementedError("Sagemaker target is not yet implemented")
class SnsTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
self.client.publish(TopicArn=self.target["Arn"], Message=to_json_str(event))
class SqsTargetSender(TargetSender):
- def send_event(self, event):
+ def send_event(self, event, trace_header):
queue_url = sqs_queue_url_for_arn(self.target["Arn"])
msg_group_id = self.target.get("SqsParameters", {}).get("MessageGroupId", None)
kwargs = {"MessageGroupId": msg_group_id} if msg_group_id else {}
+
self.client.send_message(
QueueUrl=queue_url,
MessageBody=to_json_str(event),
@@ -626,8 +659,9 @@ def send_event(self, event):
class StatesTargetSender(TargetSender):
"""Step Functions Target Sender"""
- def send_event(self, event):
+ def send_event(self, event, trace_header):
self.service = "stepfunctions"
+
self.client.start_execution(
stateMachineArn=self.target["Arn"], name=event["id"], input=to_json_str(event)
)
@@ -642,7 +676,7 @@ def _validate_input(self, target: Target):
class SystemsManagerSender(TargetSender):
"""EC2 Run Command Target Sender"""
- def send_event(self, event):
+ def send_event(self, event, trace_header):
raise NotImplementedError("Systems Manager target is not yet implemented")
def _validate_input(self, target: Target):
diff --git a/localstack-core/localstack/services/events/utils.py b/localstack-core/localstack/services/events/utils.py
index 36258ac668acb..5ac8e835b136f 100644
--- a/localstack-core/localstack/services/events/utils.py
+++ b/localstack-core/localstack/services/events/utils.py
@@ -187,6 +187,7 @@ def format_event(
event: PutEventsRequestEntry, region: str, account_id: str, event_bus_name: EventBusName
) -> FormattedEvent:
# See https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-events.html
+ # region_name and account_id of original event is preserved fro cross-region event bus communication
trace_header = event.get("TraceHeader")
message = {}
if trace_header:
diff --git a/localstack-core/localstack/services/events/v1/provider.py b/localstack-core/localstack/services/events/v1/provider.py
index bbcd4e0ac33eb..9e3da8e447f6a 100644
--- a/localstack-core/localstack/services/events/v1/provider.py
+++ b/localstack-core/localstack/services/events/v1/provider.py
@@ -25,6 +25,7 @@
EventBusNameOrArn,
EventPattern,
EventsApi,
+ KmsKeyIdentifier,
PutRuleResponse,
PutTargetsResponse,
RoleArn,
@@ -296,8 +297,10 @@ def create_connection(
auth_parameters: CreateConnectionAuthRequestParameters,
description: ConnectionDescription = None,
invocation_connectivity_parameters: ConnectivityResourceParameters = None,
+ kms_key_identifier: KmsKeyIdentifier = None,
**kwargs,
) -> CreateConnectionResponse:
+ # TODO add support for kms_key_identifier
errors = []
if not CONNECTION_NAME_PATTERN.match(name):
diff --git a/localstack-core/localstack/services/iam/provider.py b/localstack-core/localstack/services/iam/provider.py
index 26da2c8adbe21..312a2a714aafc 100644
--- a/localstack-core/localstack/services/iam/provider.py
+++ b/localstack-core/localstack/services/iam/provider.py
@@ -107,6 +107,55 @@ def get_iam_backend(context: RequestContext) -> IAMBackend:
return iam_backends[context.account_id][context.partition]
+def get_policies_from_principal(backend: IAMBackend, principal_arn: str) -> list[dict]:
+ policies = []
+ if ":role" in principal_arn:
+ role_name = principal_arn.split("/")[-1]
+
+ policies.append(backend.get_role(role_name=role_name).assume_role_policy_document)
+
+ policy_names = backend.list_role_policies(role_name=role_name)
+ policies.extend(
+ [
+ backend.get_role_policy(role_name=role_name, policy_name=policy_name)[1]
+ for policy_name in policy_names
+ ]
+ )
+
+ attached_policies, _ = backend.list_attached_role_policies(role_name=role_name)
+ policies.extend([policy.document for policy in attached_policies])
+
+ if ":group" in principal_arn:
+ print(principal_arn)
+ group_name = principal_arn.split("/")[-1]
+ policy_names = backend.list_group_policies(group_name=group_name)
+ policies.extend(
+ [
+ backend.get_group_policy(group_name=group_name, policy_name=policy_name)[1]
+ for policy_name in policy_names
+ ]
+ )
+
+ attached_policies, _ = backend.list_attached_group_policies(group_name=group_name)
+ policies.extend([policy.document for policy in attached_policies])
+
+ if ":user" in principal_arn:
+ print(principal_arn)
+ user_name = principal_arn.split("/")[-1]
+ policy_names = backend.list_user_policies(user_name=user_name)
+ policies.extend(
+ [
+ backend.get_user_policy(user_name=user_name, policy_name=policy_name)[1]
+ for policy_name in policy_names
+ ]
+ )
+
+ attached_policies, _ = backend.list_attached_user_policies(user_name=user_name)
+ policies.extend([policy.document for policy in attached_policies])
+
+ return policies
+
+
class IamProvider(IamApi):
def __init__(self):
apply_iam_patches()
@@ -168,12 +217,20 @@ def simulate_principal_policy(
**kwargs,
) -> SimulatePolicyResponse:
backend = get_iam_backend(context)
- policy = backend.get_policy(policy_source_arn)
- policy_version = backend.get_policy_version(policy_source_arn, policy.default_version_id)
- try:
- policy_statements = json.loads(policy_version.document).get("Statement", [])
- except Exception:
- raise NoSuchEntityException("Policy not found")
+
+ policies = get_policies_from_principal(backend, policy_source_arn)
+
+ def _get_statements_from_policy_list(policies: list[str]):
+ statements = []
+ for policy_str in policies:
+ policy_dict = json.loads(policy_str)
+ if isinstance(policy_dict["Statement"], list):
+ statements.extend(policy_dict["Statement"])
+ else:
+ statements.append(policy_dict["Statement"])
+ return statements
+
+ policy_statements = _get_statements_from_policy_list(policies)
evaluations = [
self.build_evaluation_result(action_name, resource_arn, policy_statements)
diff --git a/localstack-core/localstack/services/kinesis/packages.py b/localstack-core/localstack/services/kinesis/packages.py
index 53ef6b7c53610..d6b68dcd9d628 100644
--- a/localstack-core/localstack/services/kinesis/packages.py
+++ b/localstack-core/localstack/services/kinesis/packages.py
@@ -2,18 +2,18 @@
from functools import lru_cache
from typing import List
-from localstack.packages import Package, PackageInstaller
+from localstack.packages import Package
from localstack.packages.core import NodePackageInstaller
_KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.4.9"
-class KinesisMockPackage(Package):
+class KinesisMockPackage(Package[NodePackageInstaller]):
def __init__(self, default_version: str = _KINESIS_MOCK_VERSION):
super().__init__(name="Kinesis Mock", default_version=default_version)
@lru_cache
- def _get_installer(self, version: str) -> PackageInstaller:
+ def _get_installer(self, version: str) -> NodePackageInstaller:
return KinesisMockPackageInstaller(version)
def get_versions(self) -> List[str]:
diff --git a/localstack-core/localstack/services/kms/models.py b/localstack-core/localstack/services/kms/models.py
index fc96e9b4000e6..3479e309d4903 100644
--- a/localstack-core/localstack/services/kms/models.py
+++ b/localstack-core/localstack/services/kms/models.py
@@ -12,7 +12,7 @@
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
-from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm
+from cryptography.exceptions import InvalidSignature, InvalidTag, UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives import serialization as crypto_serialization
@@ -29,6 +29,7 @@
CreateGrantRequest,
CreateKeyRequest,
EncryptionContextType,
+ InvalidCiphertextException,
InvalidKeyUsageException,
KeyMetadata,
KeySpec,
@@ -36,6 +37,7 @@
KeyUsageType,
KMSInvalidMacException,
KMSInvalidSignatureException,
+ LimitExceededException,
MacAlgorithmSpec,
MessageType,
MultiRegionConfiguration,
@@ -84,6 +86,7 @@
"HMAC_512": (64, 128),
}
+ON_DEMAND_ROTATION_LIMIT = 10
KEY_ID_LEN = 36
# Moto uses IV_LEN of 12, as it is fine for GCM encryption mode, but we use CBC, so have to set it to 16.
IV_LEN = 16
@@ -175,6 +178,45 @@ class KmsCryptoKey:
key_material: bytes
key_spec: str
+ @staticmethod
+ def assert_valid(key_spec: str):
+ """
+ Validates that the given ``key_spec`` is supported in the current context.
+
+ :param key_spec: The key specification to validate.
+ :type key_spec: str
+ :raises ValidationException: If ``key_spec`` is not a known valid spec.
+ :raises UnsupportedOperationException: If ``key_spec`` is entirely unsupported.
+ """
+
+ def raise_validation():
+ raise ValidationException(
+ f"1 validation error detected: Value '{key_spec}' at 'keySpec' "
+ f"failed to satisfy constraint: Member must satisfy enum value set: "
+ f"[RSA_2048, ECC_NIST_P384, ECC_NIST_P256, ECC_NIST_P521, HMAC_384, RSA_3072, "
+ f"ECC_SECG_P256K1, RSA_4096, SYMMETRIC_DEFAULT, HMAC_256, HMAC_224, HMAC_512]"
+ )
+
+ if key_spec == "SYMMETRIC_DEFAULT":
+ return
+
+ if key_spec.startswith("RSA"):
+ if key_spec not in RSA_CRYPTO_KEY_LENGTHS:
+ raise_validation()
+ return
+
+ if key_spec.startswith("ECC"):
+ if key_spec not in ECC_CURVES:
+ raise_validation()
+ return
+
+ if key_spec.startswith("HMAC"):
+ if key_spec not in HMAC_RANGE_KEY_LENGTHS:
+ raise_validation()
+ return
+
+ raise UnsupportedOperationException(f"KeySpec {key_spec} is not supported")
+
def __init__(self, key_spec: str, key_material: Optional[bytes] = None):
self.private_key = None
self.public_key = None
@@ -185,6 +227,8 @@ def __init__(self, key_spec: str, key_material: Optional[bytes] = None):
self.key_material = key_material or os.urandom(SYMMETRIC_DEFAULT_MATERIAL_LENGTH)
self.key_spec = key_spec
+ KmsCryptoKey.assert_valid(key_spec)
+
if key_spec == "SYMMETRIC_DEFAULT":
return
@@ -193,24 +237,16 @@ def __init__(self, key_spec: str, key_material: Optional[bytes] = None):
key = rsa.generate_private_key(public_exponent=65537, key_size=key_size)
elif key_spec.startswith("ECC"):
curve = ECC_CURVES.get(key_spec)
- key = ec.generate_private_key(curve)
+ if key_material:
+ key = crypto_serialization.load_der_private_key(key_material, password=None)
+ else:
+ key = ec.generate_private_key(curve)
elif key_spec.startswith("HMAC"):
- if key_spec not in HMAC_RANGE_KEY_LENGTHS:
- raise ValidationException(
- f"1 validation error detected: Value '{key_spec}' at 'keySpec' "
- f"failed to satisfy constraint: Member must satisfy enum value set: "
- f"[RSA_2048, ECC_NIST_P384, ECC_NIST_P256, ECC_NIST_P521, HMAC_384, RSA_3072, "
- f"ECC_SECG_P256K1, RSA_4096, SYMMETRIC_DEFAULT, HMAC_256, HMAC_224, HMAC_512]"
- )
minimum_length, maximum_length = HMAC_RANGE_KEY_LENGTHS.get(key_spec)
self.key_material = key_material or os.urandom(
random.randint(minimum_length, maximum_length)
)
return
- else:
- # We do not support SM2 - asymmetric keys both suitable for ENCRYPT_DECRYPT and SIGN_VERIFY,
- # but only used in China AWS regions.
- raise UnsupportedOperationException(f"KeySpec {key_spec} is not supported")
self._serialize_key(key)
@@ -249,6 +285,7 @@ class KmsKey:
is_key_rotation_enabled: bool
rotation_period_in_days: int
next_rotation_date: datetime.datetime
+ previous_keys = [str]
def __init__(
self,
@@ -257,6 +294,7 @@ def __init__(
region: str = None,
):
create_key_request = create_key_request or CreateKeyRequest()
+ self.previous_keys = []
# Please keep in mind that tags of a key could be present in the request, they are not a part of metadata. At
# least in the sense of DescribeKey not returning them with the rest of the metadata. Instead, tags are more
@@ -319,9 +357,15 @@ def decrypt(
self, ciphertext: Ciphertext, encryption_context: EncryptionContextType = None
) -> bytes:
aad = _serialize_encryption_context(encryption_context=encryption_context)
- return decrypt(
- self.crypto_key.key_material, ciphertext.ciphertext, ciphertext.iv, ciphertext.tag, aad
- )
+ keys_to_try = [self.crypto_key.key_material] + self.previous_keys
+
+ for key in keys_to_try:
+ try:
+ return decrypt(key, ciphertext.ciphertext, ciphertext.iv, ciphertext.tag, aad)
+ except (InvalidTag, InvalidSignature):
+ continue
+
+ raise InvalidCiphertextException()
def decrypt_rsa(self, encrypted: bytes) -> bytes:
private_key = crypto_serialization.load_der_private_key(
@@ -476,7 +520,7 @@ def _construct_sign_verify_padding(
if "PKCS" in signing_algorithm:
return padding.PKCS1v15()
elif "PSS" in signing_algorithm:
- return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.MAX_LENGTH)
+ return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.DIGEST_LENGTH)
else:
LOG.warning("Unsupported padding in SigningAlgorithm '%s'", signing_algorithm)
@@ -694,6 +738,12 @@ def _get_key_usage(self, request_key_usage: str, key_spec: str) -> str:
return request_key_usage or "ENCRYPT_DECRYPT"
def rotate_key_on_demand(self):
+ if len(self.previous_keys) >= ON_DEMAND_ROTATION_LIMIT:
+ raise LimitExceededException(
+ f"The on-demand rotations limit has been reached for the given keyId. "
+ f"No more on-demand rotations can be performed for this key: {self.metadata['Arn']}"
+ )
+ self.previous_keys.append(self.crypto_key.key_material)
self.crypto_key = KmsCryptoKey(KeySpec.SYMMETRIC_DEFAULT)
diff --git a/localstack-core/localstack/services/kms/provider.py b/localstack-core/localstack/services/kms/provider.py
index 342715dc0710f..9f29780fa2103 100644
--- a/localstack-core/localstack/services/kms/provider.py
+++ b/localstack-core/localstack/services/kms/provider.py
@@ -123,7 +123,12 @@
deserialize_ciphertext_blob,
kms_stores,
)
-from localstack.services.kms.utils import is_valid_key_arn, parse_key_arn, validate_alias_name
+from localstack.services.kms.utils import (
+ execute_dry_run_capable,
+ is_valid_key_arn,
+ parse_key_arn,
+ validate_alias_name,
+)
from localstack.services.plugins import ServiceLifecycleHook
from localstack.utils.aws.arns import get_partition, kms_alias_arn, parse_arn
from localstack.utils.collections import PaginatedList
@@ -732,11 +737,21 @@ def _generate_data_key_pair(
key_id: str,
key_pair_spec: str,
encryption_context: EncryptionContextType = None,
+ dry_run: NullableBooleanType = None,
):
account_id, region_name, key_id = self._parse_key_id(key_id, context)
key = self._get_kms_key(account_id, region_name, key_id)
self._validate_key_for_encryption_decryption(context, key)
+ KmsCryptoKey.assert_valid(key_pair_spec)
+ return execute_dry_run_capable(
+ self._build_data_key_pair_response, dry_run, key, key_pair_spec, encryption_context
+ )
+
+ def _build_data_key_pair_response(
+ self, key: KmsKey, key_pair_spec: str, encryption_context: EncryptionContextType = None
+ ):
crypto_key = KmsCryptoKey(key_pair_spec)
+
return {
"KeyId": key.metadata["Arn"],
"KeyPairSpec": key_pair_spec,
@@ -757,8 +772,9 @@ def generate_data_key_pair(
dry_run: NullableBooleanType = None,
**kwargs,
) -> GenerateDataKeyPairResponse:
- # TODO add support for "dry_run"
- result = self._generate_data_key_pair(context, key_id, key_pair_spec, encryption_context)
+ result = self._generate_data_key_pair(
+ context, key_id, key_pair_spec, encryption_context, dry_run
+ )
return GenerateDataKeyPairResponse(**result)
@handler("GenerateRandom", expand=False)
@@ -794,8 +810,9 @@ def generate_data_key_pair_without_plaintext(
dry_run: NullableBooleanType = None,
**kwargs,
) -> GenerateDataKeyPairWithoutPlaintextResponse:
- # TODO add support for "dry_run"
- result = self._generate_data_key_pair(context, key_id, key_pair_spec, encryption_context)
+ result = self._generate_data_key_pair(
+ context, key_id, key_pair_spec, encryption_context, dry_run
+ )
result.pop("PrivateKeyPlaintext")
return GenerateDataKeyPairResponse(**result)
@@ -1341,7 +1358,7 @@ def list_resource_tags(
return ListResourceTagsResponse(Tags=page, **kwargs)
@handler("RotateKeyOnDemand", expand=False)
- # TODO: keep trak of key rotations as AWS does and return them in the ListKeyRotations operation
+ # TODO: return the key rotations in the ListKeyRotations operation
def rotate_key_on_demand(
self, context: RequestContext, request: RotateKeyOnDemandRequest
) -> RotateKeyOnDemandResponse:
diff --git a/localstack-core/localstack/services/kms/utils.py b/localstack-core/localstack/services/kms/utils.py
index ce1a65599e6c8..ae9ff4580caa1 100644
--- a/localstack-core/localstack/services/kms/utils.py
+++ b/localstack-core/localstack/services/kms/utils.py
@@ -1,10 +1,12 @@
import re
-from typing import Tuple
+from typing import Callable, Tuple, TypeVar
-from localstack.aws.api.kms import Tag, TagException
+from localstack.aws.api.kms import DryRunOperationException, Tag, TagException
from localstack.services.kms.exceptions import ValidationException
from localstack.utils.aws.arns import ARN_PARTITION_REGEX
+T = TypeVar("T")
+
KMS_KEY_ARN_PATTERN = re.compile(
rf"{ARN_PARTITION_REGEX}:kms:(?P[^:]+):(?P\d{{12}}):key\/(?P[^:]+)$"
)
@@ -58,3 +60,28 @@ def validate_tag(tag_position: int, tag: Tag) -> None:
if tag_key.lower().startswith("aws:"):
raise TagException("Tags beginning with aws: are reserved")
+
+
+def execute_dry_run_capable(func: Callable[..., T], dry_run: bool, *args, **kwargs) -> T:
+ """
+ Executes a function unless dry run mode is enabled.
+
+ If ``dry_run`` is ``True``, the function is not executed and a
+ ``DryRunOperationException`` is raised. Otherwise, the provided
+ function is called with the given positional and keyword arguments.
+
+ :param func: The function to be executed.
+ :type func: Callable[..., T]
+ :param dry_run: Flag indicating whether the execution is a dry run.
+ :type dry_run: bool
+ :param args: Positional arguments to pass to the function.
+ :param kwargs: Keyword arguments to pass to the function.
+ :returns: The result of the function call if ``dry_run`` is ``False``.
+ :rtype: T
+ :raises DryRunOperationException: If ``dry_run`` is ``True``.
+ """
+ if dry_run:
+ raise DryRunOperationException(
+ "The request would have succeeded, but the DryRun option is set."
+ )
+ return func(*args, **kwargs)
diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py
index 4712f5a4fd3f9..b2e85a04ea26c 100644
--- a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py
+++ b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py
@@ -159,6 +159,8 @@ def generate_event_failure_context(self, abort_condition: str, **kwargs) -> dict
if not error_payload:
return {}
# TODO: Should 'requestContext' and 'responseContext' be defined as models?
+ # TODO: Allow for generating failure context where there is no responseContext i.e
+ # if a RecordAgeExceeded condition is triggered.
context = {
"requestContext": {
"requestId": error_payload.get("requestId"),
diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py
index 38fdaafc2b537..0bf30dfb15d79 100644
--- a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py
+++ b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py
@@ -172,6 +172,7 @@ def get_esm_worker(self) -> EsmWorker:
"MaximumBatchingWindowInSeconds"
],
MaximumRetryAttempts=self.esm_config["MaximumRetryAttempts"],
+ MaximumRecordAgeInSeconds=self.esm_config["MaximumRecordAgeInSeconds"],
**optional_params,
),
)
@@ -203,6 +204,7 @@ def get_esm_worker(self) -> EsmWorker:
"MaximumBatchingWindowInSeconds"
],
MaximumRetryAttempts=self.esm_config["MaximumRetryAttempts"],
+ MaximumRecordAgeInSeconds=self.esm_config["MaximumRecordAgeInSeconds"],
**optional_params,
),
)
diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py
index d69d26baeb87a..2a8e793945c42 100644
--- a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py
+++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py
@@ -61,6 +61,8 @@ def initialize_shards(self):
**kwargs,
)
shards[shard_id] = get_shard_iterator_response["ShardIterator"]
+
+ LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
return shards
def stream_arn_param(self) -> dict:
diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py
index aae917e84db2a..e2dc19b74b012 100644
--- a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py
+++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py
@@ -84,6 +84,8 @@ def initialize_shards(self) -> dict[str, str]:
**kwargs,
)
shards[shard_id] = get_shard_iterator_response["ShardIterator"]
+
+ LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
return shards
def stream_arn_param(self) -> dict:
diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py
index fd00119dbb08e..d39805dce9113 100644
--- a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py
+++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py
@@ -1,3 +1,4 @@
+import functools
import json
import logging
from collections import defaultdict
@@ -7,7 +8,7 @@
from localstack.aws.api.pipes import PipeSourceSqsQueueParameters
from localstack.aws.api.sqs import MessageSystemAttributeName
-from localstack.config import internal_service_url
+from localstack.aws.connect import connect_to
from localstack.services.lambda_.event_source_mapping.event_processor import (
EventProcessor,
PartialBatchFailureError,
@@ -315,16 +316,19 @@ def transform_into_events(messages: list[dict]) -> list[dict]:
return events
+@functools.cache
def get_queue_url(queue_arn: str) -> str:
- # TODO: consolidate this method with localstack.services.sqs.models.SqsQueue.url
- # * Do we need to support different endpoint strategies?
- # * If so, how can we achieve this without having a request context
- host_url = internal_service_url()
- host = host_url.rstrip("/")
parsed_arn = parse_arn(queue_arn)
+
+ queue_name = parsed_arn["resource"]
account_id = parsed_arn["account"]
- name = parsed_arn["resource"]
- return f"{host}/{account_id}/{name}"
+ region = parsed_arn["region"]
+
+ sqs_client = connect_to(region_name=region).sqs
+ queue_url = sqs_client.get_queue_url(QueueName=queue_name, QueueOwnerAWSAccountId=account_id)[
+ "QueueUrl"
+ ]
+ return queue_url
def message_attributes_to_lower(message_attrs):
diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py
index 158f108592a78..72d7c3ef3523b 100644
--- a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py
+++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py
@@ -2,6 +2,7 @@
import logging
import threading
from abc import abstractmethod
+from bisect import bisect_left
from collections import defaultdict
from datetime import datetime
from typing import Iterator
@@ -154,7 +155,6 @@ def poll_events(self):
LOG.debug("No shards found for %s.", self.source_arn)
raise EmptyPollResultsException(service=self.event_source(), source_arn=self.source_arn)
else:
- LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
# Remove all shard batchers without corresponding shards
for shard_id in self.shard_batcher.keys() - self.shards.keys():
self.shard_batcher.pop(shard_id, None)
@@ -185,7 +185,10 @@ def poll_events(self):
def poll_events_from_shard(self, shard_id: str, shard_iterator: str):
get_records_response = self.get_records(shard_iterator)
records: list[dict] = get_records_response.get("Records", [])
- next_shard_iterator = get_records_response["NextShardIterator"]
+ if not (next_shard_iterator := get_records_response.get("NextShardIterator")):
+ # If the next shard iterator is None, we can assume the shard is closed or
+ # has expired on the DynamoDB Local server, hence we should re-initialize.
+ self.shards = self.initialize_shards()
# We cannot reliably back-off when no records found since an iterator
# may have to move multiple times until records are returned.
@@ -207,16 +210,7 @@ def poll_events_from_shard(self, shard_id: str, shard_iterator: str):
def forward_events_to_target(self, shard_id, next_shard_iterator, records):
polled_events = self.transform_into_events(records, shard_id)
-
abort_condition = None
- # Check MaximumRecordAgeInSeconds
- if maximum_record_age_in_seconds := self.stream_parameters.get("MaximumRecordAgeInSeconds"):
- arrival_timestamp_of_last_event = polled_events[-1]["approximateArrivalTimestamp"]
- now = get_current_time().timestamp()
- record_age_in_seconds = now - arrival_timestamp_of_last_event
- if record_age_in_seconds > maximum_record_age_in_seconds:
- abort_condition = "RecordAgeExpired"
-
# TODO: implement format detection behavior (e.g., for JSON body):
# https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html
# Check whether we need poller-specific filter-preprocessing here without modifying the actual event!
@@ -241,23 +235,32 @@ def forward_events_to_target(self, shard_id, next_shard_iterator, records):
return
events = self.add_source_metadata(matching_events_post_filter)
LOG.debug("Polled %d events from %s in shard %s", len(events), self.source_arn, shard_id)
- # TODO: A retry should probably re-trigger fetching the record from the stream again?!
# -> This could be tested by setting a high retry number, using a long pipe execution, and a relatively
# short record expiration age at the source. Check what happens if the record expires at the source.
# A potential implementation could use checkpointing based on the iterator position (within shard scope)
# TODO: handle partial batch failure (see poller.py:parse_batch_item_failures)
# TODO: think about how to avoid starvation of other shards if one shard runs into infinite retries
attempts = 0
+ discarded_events_for_dlq = []
error_payload = {}
max_retries = self.stream_parameters.get("MaximumRetryAttempts", -1)
+ max_record_age = max(
+ self.stream_parameters.get("MaximumRecordAgeInSeconds", -1), 0
+ ) # Disable check if -1
# NOTE: max_retries == 0 means exponential backoff is disabled
boff = ExponentialBackoff(max_retries=max_retries)
- while (
- not abort_condition
- and not self.max_retries_exceeded(attempts)
- and not self._is_shutdown.is_set()
- ):
+ while not abort_condition and events and not self._is_shutdown.is_set():
+ if self.max_retries_exceeded(attempts):
+ abort_condition = "RetryAttemptsExhausted"
+ break
+
+ if max_record_age:
+ events, expired_events = self.bisect_events_by_record_age(max_record_age, events)
+ if expired_events:
+ discarded_events_for_dlq.extend(expired_events)
+ continue
+
try:
if attempts > 0:
# TODO: Should we always backoff (with jitter) before processing since we may not want multiple pollers
@@ -267,10 +270,8 @@ def forward_events_to_target(self, shard_id, next_shard_iterator, records):
self.processor.process_events_batch(events)
boff.reset()
-
- # Update shard iterator if execution is successful
- self.shards[shard_id] = next_shard_iterator
- return
+ # We may need to send on data to a DLQ so break the processing loop and proceed if invocation successful.
+ break
except PartialBatchFailureError as ex:
# TODO: add tests for partial batch failure scenarios
if (
@@ -310,9 +311,8 @@ def forward_events_to_target(self, shard_id, next_shard_iterator, records):
# Discard all successful events and re-process from sequence number of failed event
_, events = self.bisect_events(lowest_sequence_id, events)
- except (BatchFailureError, Exception) as ex:
- if isinstance(ex, BatchFailureError):
- error_payload = ex.error
+ except BatchFailureError as ex:
+ error_payload = ex.error
# FIXME partner_resource_arn is not defined in ESM
LOG.debug(
@@ -320,20 +320,35 @@ def forward_events_to_target(self, shard_id, next_shard_iterator, records):
attempts,
self.partner_resource_arn or self.source_arn,
events,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ except Exception:
+ # FIXME partner_resource_arn is not defined in ESM
+ LOG.error(
+ "Attempt %d failed with unexpected error while processing %s with events: %s",
+ attempts,
+ self.partner_resource_arn or self.source_arn,
+ events,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
)
finally:
# Retry polling until the record expires at the source
attempts += 1
+ if discarded_events_for_dlq:
+ abort_condition = "RecordAgeExceeded"
+ error_payload = {}
+ events = discarded_events_for_dlq
+
# Send failed events to potential DLQ
- abort_condition = abort_condition or "RetryAttemptsExhausted"
- failure_context = self.processor.generate_event_failure_context(
- abort_condition=abort_condition,
- error=error_payload,
- attempts_count=attempts,
- partner_resource_arn=self.partner_resource_arn,
- )
- self.send_events_to_dlq(shard_id, events, context=failure_context)
+ if abort_condition:
+ failure_context = self.processor.generate_event_failure_context(
+ abort_condition=abort_condition,
+ error=error_payload,
+ attempts_count=attempts,
+ partner_resource_arn=self.partner_resource_arn,
+ )
+ self.send_events_to_dlq(shard_id, events, context=failure_context)
# Update shard iterator if the execution failed but the events are sent to a DLQ
self.shards[shard_id] = next_shard_iterator
@@ -477,6 +492,17 @@ def bisect_events(
return events, []
+ def bisect_events_by_record_age(
+ self, maximum_record_age: int, events: list[dict]
+ ) -> tuple[list[dict], list[dict]]:
+ """Splits events into [valid_events], [expired_events] based on record age.
+ Where:
+ - Events with age < maximum_record_age are valid.
+ - Events with age >= maximum_record_age are expired."""
+ cutoff_timestamp = get_current_time().timestamp() - maximum_record_age
+ index = bisect_left(events, cutoff_timestamp, key=self.get_approximate_arrival_time)
+ return events[index:], events[:index]
+
def get_failure_s3_object_key(esm_uuid: str, shard_id: str, failure_datetime: datetime) -> str:
"""
diff --git a/localstack-core/localstack/services/lambda_/invocation/assignment.py b/localstack-core/localstack/services/lambda_/invocation/assignment.py
index 24cebeb7f8320..39f4d04383e26 100644
--- a/localstack-core/localstack/services/lambda_/invocation/assignment.py
+++ b/localstack-core/localstack/services/lambda_/invocation/assignment.py
@@ -86,7 +86,9 @@ def get_environment(
except InvalidStatusException as invalid_e:
LOG.error("InvalidStatusException: %s", invalid_e)
except Exception as e:
- LOG.error("Failed invocation %s", e)
+ LOG.error(
+ "Failed invocation <%s>: %s", type(e), e, exc_info=LOG.isEnabledFor(logging.DEBUG)
+ )
self.stop_environment(execution_environment)
raise e
@@ -107,7 +109,7 @@ def start_environment(
except EnvironmentStartupTimeoutException:
raise
except Exception as e:
- message = f"Could not start new environment: {e}"
+ message = f"Could not start new environment: {type(e).__name__}:{e}"
raise AssignmentException(message) from e
return execution_environment
diff --git a/localstack-core/localstack/services/lambda_/invocation/counting_service.py b/localstack-core/localstack/services/lambda_/invocation/counting_service.py
index 25d2ddf79f689..3c7024288a305 100644
--- a/localstack-core/localstack/services/lambda_/invocation/counting_service.py
+++ b/localstack-core/localstack/services/lambda_/invocation/counting_service.py
@@ -156,6 +156,15 @@ def get_invocation_lease(
provisioned_concurrency_config = function.provisioned_concurrency_configs.get(
function_version.id.qualifier
)
+ if not provisioned_concurrency_config:
+ # check if any aliases point to the current version, and check the provisioned concurrency config
+ # for them. There can be only one config for a version, not matter if defined on the alias or version itself.
+ for alias in function.aliases.values():
+ if alias.function_version == function_version.id.qualifier:
+ provisioned_concurrency_config = (
+ function.provisioned_concurrency_configs.get(alias.name)
+ )
+ break
if provisioned_concurrency_config:
available_provisioned_concurrency = (
provisioned_concurrency_config.provisioned_concurrent_executions
diff --git a/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py b/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py
index ec9e20ef46d33..c67f39addb414 100644
--- a/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py
+++ b/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py
@@ -32,13 +32,13 @@
from localstack.services.lambda_.runtimes import IMAGE_MAPPING
from localstack.utils.container_networking import get_main_container_name
from localstack.utils.container_utils.container_client import (
+ BindMount,
ContainerConfiguration,
DockerNotAvailable,
DockerPlatform,
NoSuchContainer,
NoSuchImage,
PortMappings,
- VolumeBind,
VolumeMappings,
)
from localstack.utils.docker_utils import DOCKER_CLIENT as CONTAINER_CLIENT
@@ -331,7 +331,7 @@ def start(self, env_vars: dict[str, str]) -> None:
if container_config.volumes is None:
container_config.volumes = VolumeMappings()
container_config.volumes.add(
- VolumeBind(
+ BindMount(
str(self.function_version.config.code.get_unzipped_code_location()),
"/var/task",
read_only=True,
diff --git a/localstack-core/localstack/services/lambda_/invocation/execution_environment.py b/localstack-core/localstack/services/lambda_/invocation/execution_environment.py
index bd65ba3904c69..139ec4d877fbe 100644
--- a/localstack-core/localstack/services/lambda_/invocation/execution_environment.py
+++ b/localstack-core/localstack/services/lambda_/invocation/execution_environment.py
@@ -37,10 +37,11 @@ class RuntimeStatus(Enum):
INACTIVE = auto()
STARTING = auto()
READY = auto()
- RUNNING = auto()
+ INVOKING = auto()
STARTUP_FAILED = auto()
STARTUP_TIMED_OUT = auto()
STOPPED = auto()
+ TIMING_OUT = auto()
class InvalidStatusException(Exception):
@@ -246,7 +247,7 @@ def stop(self) -> None:
def release(self) -> None:
self.last_returned = datetime.now()
with self.status_lock:
- if self.status != RuntimeStatus.RUNNING:
+ if self.status != RuntimeStatus.INVOKING:
raise InvalidStatusException(
f"Execution environment {self.id} can only be set to status ready while running."
f" Current status: {self.status}"
@@ -264,7 +265,7 @@ def reserve(self) -> None:
f"Execution environment {self.id} can only be reserved if ready. "
f" Current status: {self.status}"
)
- self.status = RuntimeStatus.RUNNING
+ self.status = RuntimeStatus.INVOKING
self.keepalive_timer.cancel()
@@ -274,6 +275,17 @@ def keepalive_passed(self) -> None:
self.id,
self.function_version.qualified_arn,
)
+ # The stop() method allows to interrupt invocations (on purpose), which might cancel running invocations
+ # which we should not do when the keepalive timer passed.
+ # The new TIMING_OUT state prevents this race condition
+ with self.status_lock:
+ if self.status != RuntimeStatus.READY:
+ LOG.debug(
+ "Keepalive timer passed, but current runtime status is %s. Aborting keepalive stop.",
+ self.status,
+ )
+ return
+ self.status = RuntimeStatus.TIMING_OUT
self.stop()
# Notify assignment service via callback to remove from environments list
self.on_timeout(self.version_manager_id, self.id)
@@ -340,7 +352,7 @@ def get_prefixed_logs(self) -> str:
return f"{prefix}{prefixed_logs}"
def invoke(self, invocation: Invocation) -> InvocationResult:
- assert self.status == RuntimeStatus.RUNNING
+ assert self.status == RuntimeStatus.INVOKING
# Async/event invokes might miss an aws_trace_header, then we need to create a new root trace id.
aws_trace_header = (
invocation.trace_context.get("aws_trace_header") or TraceHeader().ensure_root_exists()
diff --git a/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py b/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py
index 757dab5d08324..eea6e0c77ebaa 100644
--- a/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py
+++ b/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py
@@ -1,8 +1,9 @@
import abc
import logging
+import time
from concurrent.futures import CancelledError, Future
from http import HTTPStatus
-from typing import Dict, Optional
+from typing import Any, Dict, Optional
import requests
from werkzeug import Request
@@ -10,6 +11,7 @@
from localstack.http import Response, route
from localstack.services.edge import ROUTER
from localstack.services.lambda_.invocation.lambda_models import InvocationResult
+from localstack.utils.backoff import ExponentialBackoff
from localstack.utils.lambda_debug_mode.lambda_debug_mode import (
DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS,
is_lambda_debug_mode,
@@ -192,7 +194,9 @@ def invoke(self, payload: Dict[str, str]) -> InvocationResult:
invocation_url = f"http://{self.container_address}:{self.container_port}/invoke"
# disable proxies for internal requests
proxies = {"http": "", "https": ""}
- response = requests.post(url=invocation_url, json=payload, proxies=proxies)
+ response = self._perform_invoke(
+ invocation_url=invocation_url, proxies=proxies, payload=payload
+ )
if not response.ok:
raise InvokeSendError(
f"Error while sending invocation {payload} to {invocation_url}. Error Code: {response.status_code}"
@@ -214,3 +218,65 @@ def invoke(self, payload: Dict[str, str]) -> InvocationResult:
invoke_timeout_buffer_seconds = 5
timeout_seconds = lambda_max_timeout_seconds + invoke_timeout_buffer_seconds
return self.invocation_future.result(timeout=timeout_seconds)
+
+ @staticmethod
+ def _perform_invoke(
+ invocation_url: str,
+ proxies: dict[str, str],
+ payload: dict[str, Any],
+ ) -> requests.Response:
+ """
+ Dispatches a Lambda invocation request to the specified container endpoint, with automatic
+ retries in case of connection errors, using exponential backoff.
+
+ The first attempt is made immediately. If it fails, exponential backoff is applied with
+ retry intervals starting at 100ms, doubling each time for up to 5 total retries.
+
+ Parameters:
+ invocation_url (str): The full URL of the container's invocation endpoint.
+ proxies (dict[str, str]): Proxy settings to be used for the HTTP request.
+ payload (dict[str, Any]): The JSON payload to send to the container.
+
+ Returns:
+ Response: The successful HTTP response from the container.
+
+ Raises:
+ requests.exceptions.ConnectionError: If all retry attempts fail to connect.
+ """
+ backoff = None
+ last_exception = None
+ max_retry_on_connection_error = 5
+
+ for attempt_count in range(max_retry_on_connection_error + 1): # 1 initial + n retries
+ try:
+ response = requests.post(url=invocation_url, json=payload, proxies=proxies)
+ return response
+ except requests.exceptions.ConnectionError as connection_error:
+ last_exception = connection_error
+
+ if backoff is None:
+ LOG.debug(
+ "Initial connection attempt failed: %s. Starting backoff retries.",
+ connection_error,
+ )
+ backoff = ExponentialBackoff(
+ max_retries=max_retry_on_connection_error,
+ initial_interval=0.1,
+ multiplier=2.0,
+ randomization_factor=0.0,
+ max_interval=1,
+ max_time_elapsed=-1,
+ )
+
+ delay = backoff.next_backoff()
+ if delay > 0:
+ LOG.debug(
+ "Connection error on invoke attempt #%d: %s. Retrying in %.2f seconds",
+ attempt_count,
+ connection_error,
+ delay,
+ )
+ time.sleep(delay)
+
+ LOG.debug("Connection error after all attempts exhausted: %s", last_exception)
+ raise last_exception
diff --git a/localstack-core/localstack/services/lambda_/invocation/logs.py b/localstack-core/localstack/services/lambda_/invocation/logs.py
index a63f1ab2d04f4..2ff2ab35d951b 100644
--- a/localstack-core/localstack/services/lambda_/invocation/logs.py
+++ b/localstack-core/localstack/services/lambda_/invocation/logs.py
@@ -1,13 +1,13 @@
import dataclasses
import logging
import threading
+import time
from queue import Queue
from typing import Optional, Union
from localstack.aws.connect import connect_to
from localstack.utils.aws.client_types import ServicePrincipal
from localstack.utils.bootstrap import is_api_enabled
-from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
from localstack.utils.threads import FuncThread
LOG = logging.getLogger(__name__)
@@ -50,10 +50,34 @@ def run_log_loop(self, *args, **kwargs) -> None:
log_item = self.log_queue.get()
if log_item is QUEUE_SHUTDOWN:
return
+ # we need to split by newline - but keep the newlines in the strings
+ # strips empty lines, as they are not accepted by cloudwatch
+ logs = [line + "\n" for line in log_item.logs.split("\n") if line]
+ # until we have a better way to have timestamps, log events have the same time for a single invocation
+ log_events = [
+ {"timestamp": int(time.time() * 1000), "message": log_line} for log_line in logs
+ ]
try:
- store_cloudwatch_logs(
- logs_client, log_item.log_group, log_item.log_stream, log_item.logs
- )
+ try:
+ logs_client.put_log_events(
+ logGroupName=log_item.log_group,
+ logStreamName=log_item.log_stream,
+ logEvents=log_events,
+ )
+ except logs_client.exceptions.ResourceNotFoundException:
+ # create new log group
+ try:
+ logs_client.create_log_group(logGroupName=log_item.log_group)
+ except logs_client.exceptions.ResourceAlreadyExistsException:
+ pass
+ logs_client.create_log_stream(
+ logGroupName=log_item.log_group, logStreamName=log_item.log_stream
+ )
+ logs_client.put_log_events(
+ logGroupName=log_item.log_group,
+ logStreamName=log_item.log_stream,
+ logEvents=log_events,
+ )
except Exception as e:
LOG.warning(
"Error saving logs to group %s in region %s: %s",
diff --git a/localstack-core/localstack/services/lambda_/invocation/version_manager.py b/localstack-core/localstack/services/lambda_/invocation/version_manager.py
index f39d706c3f118..e53049dc82754 100644
--- a/localstack-core/localstack/services/lambda_/invocation/version_manager.py
+++ b/localstack-core/localstack/services/lambda_/invocation/version_manager.py
@@ -109,7 +109,7 @@ def start(self) -> VersionState:
self.function_arn,
self.function_version.config.internal_revision,
e,
- exc_info=True,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
)
return self.state
@@ -238,13 +238,20 @@ def invoke(self, *, invocation: Invocation) -> InvocationResult:
)
# TODO: consider using the same prefix logging as in error case for execution environment.
# possibly as separate named logger.
- LOG.debug("Got logs for invocation '%s'", invocation.request_id)
- for log_line in invocation_result.logs.splitlines():
- LOG.debug(
- "[%s-%s] %s",
- function_id.function_name,
+ if invocation_result.logs is not None:
+ LOG.debug("Got logs for invocation '%s'", invocation.request_id)
+ for log_line in invocation_result.logs.splitlines():
+ LOG.debug(
+ "[%s-%s] %s",
+ function_id.function_name,
+ invocation.request_id,
+ truncate(log_line, config.LAMBDA_TRUNCATE_STDOUT),
+ )
+ else:
+ LOG.warning(
+ "[%s] Error while printing logs for function '%s': Received no logs from environment.",
invocation.request_id,
- truncate(log_line, config.LAMBDA_TRUNCATE_STDOUT),
+ function_id.function_name,
)
return invocation_result
@@ -260,7 +267,8 @@ def store_logs(
self.log_handler.add_logs(log_item)
else:
LOG.warning(
- "Received no logs from invocation with id %s for lambda %s",
+ "Received no logs from invocation with id %s for lambda %s. Execution environment logs: \n%s",
invocation_result.request_id,
self.function_arn,
+ execution_env.get_prefixed_logs(),
)
diff --git a/localstack-core/localstack/services/lambda_/packages.py b/localstack-core/localstack/services/lambda_/packages.py
index fd893bc1591df..fd549c1c7ad34 100644
--- a/localstack-core/localstack/services/lambda_/packages.py
+++ b/localstack-core/localstack/services/lambda_/packages.py
@@ -13,7 +13,7 @@
"""Customized LocalStack version of the AWS Lambda Runtime Interface Emulator (RIE).
https://github.com/localstack/lambda-runtime-init/blob/localstack/README-LOCALSTACK.md
"""
-LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.32-pre"
+LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.33-pre"
LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION
LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}"
diff --git a/localstack-core/localstack/services/lambda_/provider.py b/localstack-core/localstack/services/lambda_/provider.py
index 9356dde0f281c..add4c2f8cdd0b 100644
--- a/localstack-core/localstack/services/lambda_/provider.py
+++ b/localstack-core/localstack/services/lambda_/provider.py
@@ -223,6 +223,7 @@
DEPRECATED_RUNTIMES,
DEPRECATED_RUNTIMES_UPGRADES,
RUNTIMES_AGGREGATED,
+ SNAP_START_SUPPORTED_RUNTIMES,
VALID_RUNTIMES,
)
from localstack.services.lambda_.urlrouter import FunctionUrlRouter
@@ -718,6 +719,11 @@ def _validate_snapstart(snap_start: SnapStart, runtime: Runtime):
f"1 validation error detected: Value '{apply_on}' at 'snapStart.applyOn' failed to satisfy constraint: Member must satisfy enum value set: [PublishedVersions, None]"
)
+ if runtime not in SNAP_START_SUPPORTED_RUNTIMES:
+ raise InvalidParameterValueException(
+ f"{runtime} is not supported for SnapStart enabled functions.", Type="User"
+ )
+
def _validate_layers(self, new_layers: list[str], region: str, account_id: str):
if len(new_layers) > LAMBDA_LAYERS_LIMIT_PER_FUNCTION:
raise InvalidParameterValueException(
@@ -1597,10 +1603,19 @@ def invoke(
except ServiceException:
raise
except EnvironmentStartupTimeoutException as e:
- raise LambdaServiceException("Internal error while executing lambda") from e
+ raise LambdaServiceException(
+ f"[{context.request_id}] Timeout while starting up lambda environment for function {function_name}:{qualifier}"
+ ) from e
except Exception as e:
- LOG.error("Error while invoking lambda", exc_info=e)
- raise LambdaServiceException("Internal error while executing lambda") from e
+ LOG.error(
+ "[%s] Error while invoking lambda %s",
+ context.request_id,
+ function_name,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ raise LambdaServiceException(
+ f"[{context.request_id}] Internal error while executing lambda {function_name}:{qualifier}. Caused by {type(e).__name__}: {e}"
+ ) from e
if invocation_type == InvocationType.Event:
# This happens when invocation type is event
@@ -1973,6 +1988,8 @@ def create_event_source_mapping_v2(
def validate_event_source_mapping(self, context, request):
# TODO: test whether stream ARNs are valid sources for Pipes or ESM or whether only DynamoDB table ARNs work
+ # TODO: Validate MaxRecordAgeInSeconds (i.e cannot subceed 60s but can be -1) and MaxRetryAttempts parameters.
+ # See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html#cfn-lambda-eventsourcemapping-maximumrecordageinseconds
is_create_esm_request = context.operation.name == self.create_event_source_mapping.operation
if destination_config := request.get("DestinationConfig"):
diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py
index 60b9c36b4c2ac..bbcc61e335934 100644
--- a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py
+++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py
@@ -99,6 +99,13 @@ class Code(TypedDict):
ZipFile: Optional[str]
+class LoggingConfig(TypedDict):
+ ApplicationLogLevel: Optional[str]
+ LogFormat: Optional[str]
+ LogGroup: Optional[str]
+ SystemLogLevel: Optional[str]
+
+
class Environment(TypedDict):
Variables: Optional[dict]
@@ -333,11 +340,22 @@ def create(
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - elasticfilesystem:DescribeMountTargets
+ - kms:CreateGrant
- kms:Decrypt
+ - kms:Encrypt
+ - kms:GenerateDataKey
- lambda:GetCodeSigningConfig
- lambda:GetFunctionCodeSigningConfig
+ - lambda:GetLayerVersion
- lambda:GetRuntimeManagementConfig
- lambda:PutRuntimeManagementConfig
+ - lambda:TagResource
+ - lambda:GetPolicy
+ - lambda:AddPermission
+ - lambda:RemovePermission
+ - lambda:GetResourcePolicy
+ - lambda:PutResourcePolicy
"""
model = request.desired_state
@@ -368,6 +386,7 @@ def create(
"Timeout",
"TracingConfig",
"VpcConfig",
+ "LoggingConfig",
],
)
if "Timeout" in kwargs:
@@ -481,13 +500,22 @@ def update(
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - elasticfilesystem:DescribeMountTargets
+ - kms:CreateGrant
- kms:Decrypt
+ - kms:GenerateDataKey
+ - lambda:GetRuntimeManagementConfig
+ - lambda:PutRuntimeManagementConfig
- lambda:PutFunctionCodeSigningConfig
- lambda:DeleteFunctionCodeSigningConfig
- lambda:GetCodeSigningConfig
- lambda:GetFunctionCodeSigningConfig
- - lambda:GetRuntimeManagementConfig
- - lambda:PutRuntimeManagementConfig
+ - lambda:GetPolicy
+ - lambda:AddPermission
+ - lambda:RemovePermission
+ - lambda:GetResourcePolicy
+ - lambda:PutResourcePolicy
+ - lambda:DeleteResourcePolicy
"""
client = request.aws_client_factory.lambda_
@@ -512,6 +540,7 @@ def update(
"Timeout",
"TracingConfig",
"VpcConfig",
+ "LoggingConfig",
]
update_config_props = util.select_attributes(request.desired_state, config_keys)
function_name = request.previous_state["FunctionName"]
diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json
index a03d74999becd..b1d128047b150 100644
--- a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json
+++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json
@@ -1,4 +1,11 @@
{
+ "tagging": {
+ "taggable": true,
+ "tagOnCreate": true,
+ "tagUpdatable": true,
+ "tagProperty": "/properties/Tags",
+ "cloudFormationSystemTags": true
+ },
"handlers": {
"read": {
"permissions": [
@@ -17,11 +24,22 @@
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
+ "elasticfilesystem:DescribeMountTargets",
+ "kms:CreateGrant",
"kms:Decrypt",
+ "kms:Encrypt",
+ "kms:GenerateDataKey",
"lambda:GetCodeSigningConfig",
"lambda:GetFunctionCodeSigningConfig",
+ "lambda:GetLayerVersion",
"lambda:GetRuntimeManagementConfig",
- "lambda:PutRuntimeManagementConfig"
+ "lambda:PutRuntimeManagementConfig",
+ "lambda:TagResource",
+ "lambda:GetPolicy",
+ "lambda:AddPermission",
+ "lambda:RemovePermission",
+ "lambda:GetResourcePolicy",
+ "lambda:PutResourcePolicy"
]
},
"update": {
@@ -40,13 +58,22 @@
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
+ "elasticfilesystem:DescribeMountTargets",
+ "kms:CreateGrant",
"kms:Decrypt",
+ "kms:GenerateDataKey",
+ "lambda:GetRuntimeManagementConfig",
+ "lambda:PutRuntimeManagementConfig",
"lambda:PutFunctionCodeSigningConfig",
"lambda:DeleteFunctionCodeSigningConfig",
"lambda:GetCodeSigningConfig",
"lambda:GetFunctionCodeSigningConfig",
- "lambda:GetRuntimeManagementConfig",
- "lambda:PutRuntimeManagementConfig"
+ "lambda:GetPolicy",
+ "lambda:AddPermission",
+ "lambda:RemovePermission",
+ "lambda:GetResourcePolicy",
+ "lambda:PutResourcePolicy",
+ "lambda:DeleteResourcePolicy"
]
},
"list": {
@@ -63,13 +90,15 @@
},
"typeName": "AWS::Lambda::Function",
"readOnlyProperties": [
- "/properties/Arn",
"/properties/SnapStartResponse",
"/properties/SnapStartResponse/ApplyOn",
- "/properties/SnapStartResponse/OptimizationStatus"
+ "/properties/SnapStartResponse/OptimizationStatus",
+ "/properties/Arn"
],
- "description": "Resource Type definition for AWS::Lambda::Function",
+ "description": "Resource Type definition for AWS::Lambda::Function in region",
"writeOnlyProperties": [
+ "/properties/SnapStart",
+ "/properties/SnapStart/ApplyOn",
"/properties/Code",
"/properties/Code/ImageUri",
"/properties/Code/S3Bucket",
@@ -133,6 +162,10 @@
"additionalProperties": false,
"type": "object",
"properties": {
+ "Ipv6AllowedForDualStack": {
+ "description": "A boolean indicating whether IPv6 protocols will be allowed for dual stack subnets",
+ "type": "boolean"
+ },
"SecurityGroupIds": {
"maxItems": 5,
"uniqueItems": false,
@@ -261,6 +294,49 @@
}
}
},
+ "LoggingConfig": {
+ "description": "The function's logging configuration.",
+ "additionalProperties": false,
+ "type": "object",
+ "properties": {
+ "LogFormat": {
+ "description": "Log delivery format for the lambda function",
+ "type": "string",
+ "enum": [
+ "Text",
+ "JSON"
+ ]
+ },
+ "ApplicationLogLevel": {
+ "description": "Application log granularity level, can only be used when LogFormat is set to JSON",
+ "type": "string",
+ "enum": [
+ "TRACE",
+ "DEBUG",
+ "INFO",
+ "WARN",
+ "ERROR",
+ "FATAL"
+ ]
+ },
+ "LogGroup": {
+ "minLength": 1,
+ "pattern": "[\\.\\-_/#A-Za-z0-9]+",
+ "description": "The log group name.",
+ "type": "string",
+ "maxLength": 512
+ },
+ "SystemLogLevel": {
+ "description": "System log granularity level, can only be used when LogFormat is set to JSON",
+ "type": "string",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARN"
+ ]
+ }
+ }
+ },
"Environment": {
"description": "A function's environment variable settings.",
"additionalProperties": false,
@@ -457,6 +533,10 @@
"description": "The Amazon Resource Name (ARN) of the function's execution role.",
"type": "string"
},
+ "LoggingConfig": {
+ "description": "The logging configuration of your function",
+ "$ref": "#/definitions/LoggingConfig"
+ },
"Environment": {
"description": "Environment variables that are accessible from function code during execution.",
"$ref": "#/definitions/Environment"
diff --git a/localstack-core/localstack/services/lambda_/runtimes.py b/localstack-core/localstack/services/lambda_/runtimes.py
index 4eaf2a876f04e..3fa96216257f6 100644
--- a/localstack-core/localstack/services/lambda_/runtimes.py
+++ b/localstack-core/localstack/services/lambda_/runtimes.py
@@ -59,6 +59,7 @@
Runtime.dotnet6: "dotnet:6",
Runtime.dotnetcore3_1: "dotnet:core3.1", # deprecated Apr 3, 2023 => Apr 3, 2023 => May 3, 2023
Runtime.go1_x: "go:1", # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024
+ Runtime.ruby3_4: "ruby:3.4",
Runtime.ruby3_3: "ruby:3.3",
Runtime.ruby3_2: "ruby:3.2",
Runtime.ruby2_7: "ruby:2.7", # deprecated Dec 7, 2023 => Jan 9, 2024 => Feb 8, 2024
@@ -133,6 +134,7 @@
"ruby": [
Runtime.ruby3_2,
Runtime.ruby3_3,
+ Runtime.ruby3_4,
],
"dotnet": [
Runtime.dotnet6,
@@ -149,7 +151,18 @@
runtime for runtime_group in RUNTIMES_AGGREGATED.values() for runtime in runtime_group
]
+# An unordered list of snapstart-enabled runtimes. Related to snapshots in test_snapstart_exceptions
+# https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html
+SNAP_START_SUPPORTED_RUNTIMES = [
+ Runtime.java11,
+ Runtime.java17,
+ Runtime.java21,
+ Runtime.python3_12,
+ Runtime.python3_13,
+ Runtime.dotnet8,
+]
+
# An ordered list of all Lambda runtimes considered valid by AWS. Matching snapshots in test_create_lambda_exceptions
-VALID_RUNTIMES: str = "[nodejs20.x, provided.al2023, python3.12, python3.13, nodejs22.x, java17, nodejs16.x, dotnet8, python3.10, java11, python3.11, dotnet6, java21, nodejs18.x, provided.al2, ruby3.3, java8.al2, ruby3.2, python3.8, python3.9]"
+VALID_RUNTIMES: str = "[nodejs20.x, provided.al2023, python3.12, python3.13, nodejs22.x, java17, nodejs16.x, dotnet8, python3.10, java11, python3.11, dotnet6, java21, nodejs18.x, provided.al2, ruby3.3, ruby3.4, java8.al2, ruby3.2, python3.8, python3.9]"
# An ordered list of all Lambda runtimes for layers considered valid by AWS. Matching snapshots in test_layer_exceptions
-VALID_LAYER_RUNTIMES: str = "[ruby2.6, dotnetcore1.0, python3.7, nodejs8.10, nasa, ruby2.7, python2.7-greengrass, dotnetcore2.0, python3.8, java21, dotnet6, dotnetcore2.1, python3.9, java11, nodejs6.10, provided, dotnetcore3.1, dotnet8, java17, nodejs, nodejs4.3, java8.al2, go1.x, nodejs20.x, go1.9, byol, nodejs10.x, provided.al2023, nodejs22.x, python3.10, java8, nodejs12.x, python3.11, nodejs8.x, python3.12, nodejs14.x, nodejs8.9, python3.13, nodejs16.x, provided.al2, nodejs4.3-edge, nodejs18.x, ruby3.2, python3.4, ruby3.3, ruby2.5, python3.6, python2.7]"
+VALID_LAYER_RUNTIMES: str = "[ruby2.6, dotnetcore1.0, python3.7, nodejs8.10, nasa, ruby2.7, python2.7-greengrass, dotnetcore2.0, python3.8, java21, dotnet6, dotnetcore2.1, python3.9, java11, nodejs6.10, provided, dotnetcore3.1, dotnet8, java25, java17, nodejs, nodejs4.3, java8.al2, go1.x, dotnet10, nodejs20.x, go1.9, byol, nodejs10.x, provided.al2023, nodejs22.x, python3.10, java8, nodejs12.x, python3.11, nodejs24.x, nodejs8.x, python3.12, nodejs14.x, nodejs8.9, python3.13, python3.14, nodejs16.x, provided.al2, nodejs4.3-edge, nodejs18.x, ruby3.2, python3.4, ruby3.3, ruby3.4, ruby2.5, python3.6, python2.7]"
diff --git a/localstack-core/localstack/services/s3/presigned_url.py b/localstack-core/localstack/services/s3/presigned_url.py
index 573ac0a257a0a..ecdd527e65861 100644
--- a/localstack-core/localstack/services/s3/presigned_url.py
+++ b/localstack-core/localstack/services/s3/presigned_url.py
@@ -60,7 +60,7 @@
SIGNATURE_V2_POST_FIELDS = [
"signature",
- "AWSAccessKeyId",
+ "awsaccesskeyid",
]
SIGNATURE_V4_POST_FIELDS = [
@@ -768,13 +768,17 @@ def validate_post_policy(
)
raise ex
- if not (policy := request_form.get("policy")):
+ form_dict = {k.lower(): v for k, v in request_form.items()}
+
+ policy = form_dict.get("policy")
+ if not policy:
# A POST request needs a policy except if the bucket is publicly writable
return
# TODO: this does validation of fields only for now
- is_v4 = _is_match_with_signature_fields(request_form, SIGNATURE_V4_POST_FIELDS)
- is_v2 = _is_match_with_signature_fields(request_form, SIGNATURE_V2_POST_FIELDS)
+ is_v4 = _is_match_with_signature_fields(form_dict, SIGNATURE_V4_POST_FIELDS)
+ is_v2 = _is_match_with_signature_fields(form_dict, SIGNATURE_V2_POST_FIELDS)
+
if not is_v2 and not is_v4:
ex: AccessDenied = AccessDenied("Access Denied")
ex.HostId = FAKE_HOST_ID
@@ -784,7 +788,7 @@ def validate_post_policy(
policy_decoded = json.loads(base64.b64decode(policy).decode("utf-8"))
except ValueError:
# this means the policy has been tampered with
- signature = request_form.get("signature") if is_v2 else request_form.get("x-amz-signature")
+ signature = form_dict.get("signature") if is_v2 else form_dict.get("x-amz-signature")
credentials = get_credentials_from_parameters(request_form, "us-east-1")
ex: SignatureDoesNotMatch = create_signature_does_not_match_sig_v2(
request_signature=signature,
@@ -813,7 +817,6 @@ def validate_post_policy(
return
conditions = policy_decoded.get("conditions", [])
- form_dict = {k.lower(): v for k, v in request_form.items()}
for condition in conditions:
if not _verify_condition(condition, form_dict, additional_policy_metadata):
str_condition = str(condition).replace("'", '"')
@@ -896,7 +899,7 @@ def _parse_policy_expiration_date(expiration_string: str) -> datetime.datetime:
def _is_match_with_signature_fields(
- request_form: ImmutableMultiDict, signature_fields: list[str]
+ request_form: dict[str, str], signature_fields: list[str]
) -> bool:
"""
Checks if the form contains at least one of the required fields passed in `signature_fields`
@@ -910,12 +913,13 @@ def _is_match_with_signature_fields(
for p in signature_fields:
if p not in request_form:
LOG.info("POST pre-sign missing fields")
- # .capitalize() does not work here, because of AWSAccessKeyId casing
argument_name = (
- capitalize_header_name_from_snake_case(p)
- if "-" in p
- else f"{p[0].upper()}{p[1:]}"
+ capitalize_header_name_from_snake_case(p) if "-" in p else p.capitalize()
)
+ # AWSAccessKeyId is a special case
+ if argument_name == "Awsaccesskeyid":
+ argument_name = "AWSAccessKeyId"
+
ex: InvalidArgument = _create_invalid_argument_exc(
message=f"Bucket POST must contain a field named '{argument_name}'. If it is specified, please check the order of the fields.",
name=argument_name,
diff --git a/localstack-core/localstack/services/secretsmanager/provider.py b/localstack-core/localstack/services/secretsmanager/provider.py
index efefe6220819d..5838732f2c4b0 100644
--- a/localstack-core/localstack/services/secretsmanager/provider.py
+++ b/localstack-core/localstack/services/secretsmanager/provider.py
@@ -729,17 +729,28 @@ def backend_rotate_secret(
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
- if self.secrets[secret_id].is_deleted():
+ secret = self.secrets[secret_id]
+ if secret.is_deleted():
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
+ # Resolve rotation_lambda_arn and fallback to previous value if its missing
+ # from the current request
+ rotation_lambda_arn = rotation_lambda_arn or secret.rotation_lambda_arn
+ if not rotation_lambda_arn:
+ raise InvalidRequestException(
+ "No Lambda rotation function ARN is associated with this secret."
+ )
if rotation_lambda_arn:
if len(rotation_lambda_arn) > 2048:
msg = "RotationLambdaARN must <= 2048 characters long."
raise InvalidParameterException(msg)
+ # In case rotation_period is not provided, resolve auto_rotate_after_days
+ # and fallback to previous value if its missing from the current request.
+ rotation_period = secret.auto_rotate_after_days or 0
if rotation_rules:
if rotation_days in rotation_rules:
rotation_period = rotation_rules[rotation_days]
@@ -753,8 +764,6 @@ def backend_rotate_secret(
except Exception:
raise ResourceNotFoundException("Lambda does not exist or could not be accessed")
- secret = self.secrets[secret_id]
-
# The rotation function must end with the versions of the secret in
# one of two states:
#
@@ -782,7 +791,7 @@ def backend_rotate_secret(
pass
secret.rotation_lambda_arn = rotation_lambda_arn
- secret.auto_rotate_after_days = rotation_rules.get(rotation_days, 0)
+ secret.auto_rotate_after_days = rotation_period
if secret.auto_rotate_after_days > 0:
wait_interval_s = int(rotation_period) * 86400
secret.next_rotation_date = int(time.time()) + wait_interval_s
diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py
index 165545353b0d1..00b68044ae750 100644
--- a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py
+++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py
@@ -148,6 +148,7 @@ def delete(
IAM permissions required:
- sns:DeleteTopic
"""
+ # FIXME: This appears to incorrectly assume TopicArn would be provided.
model = request.desired_state
sns = request.aws_client_factory.sns
sns.delete_topic(TopicArn=model["TopicArn"])
diff --git a/localstack-core/localstack/services/sqs/models.py b/localstack-core/localstack/services/sqs/models.py
index 779a95437ad91..8e7352bd28172 100644
--- a/localstack-core/localstack/services/sqs/models.py
+++ b/localstack-core/localstack/services/sqs/models.py
@@ -30,9 +30,9 @@
)
from localstack.services.sqs.queue import InterruptiblePriorityQueue, InterruptibleQueue
from localstack.services.sqs.utils import (
- decode_receipt_handle,
encode_move_task_handle,
encode_receipt_handle,
+ extract_receipt_handle_info,
global_message_sequence,
guess_endpoint_strategy_and_host,
is_message_deduplication_id_required,
@@ -445,7 +445,7 @@ def approx_number_of_messages_delayed(self) -> int:
return len(self.delayed)
def validate_receipt_handle(self, receipt_handle: str):
- if self.arn != decode_receipt_handle(receipt_handle):
+ if self.arn != extract_receipt_handle_info(receipt_handle).queue_arn:
raise ReceiptHandleIsInvalid(
f'The input receipt handle "{receipt_handle}" is not a valid receipt handle.'
)
@@ -490,6 +490,7 @@ def remove(self, receipt_handle: str):
return
standard_message = self.receipts[receipt_handle]
+ self._pre_delete_checks(standard_message, receipt_handle)
standard_message.deleted = True
LOG.debug(
"deleting message %s from queue %s",
@@ -724,6 +725,18 @@ def remove_expired_messages_from_heap(
return expired
+ def _pre_delete_checks(self, standard_message: SqsMessage, receipt_handle: str) -> None:
+ """
+ Runs any potential checks if a message that has been successfully identified via a receipt handle
+ is indeed supposed to be deleted.
+ For example, a receipt handle that has expired might not lead to deletion.
+
+ :param standard_message: The message to be deleted
+ :param receipt_handle: The handle associated with the message
+ :return: None. Potential violations raise errors.
+ """
+ pass
+
class StandardQueue(SqsQueue):
visible: InterruptiblePriorityQueue[SqsMessage]
@@ -1001,9 +1014,15 @@ def update_delay_seconds(self, value: int):
for message in self.delayed:
message.delay_seconds = value
+ def _pre_delete_checks(self, message: SqsMessage, receipt_handle: str) -> None:
+ _, _, _, last_received = extract_receipt_handle_info(receipt_handle)
+ if time.time() - float(last_received) > message.visibility_timeout:
+ raise InvalidParameterValueException(
+ f"Value {receipt_handle} for parameter ReceiptHandle is invalid. Reason: The receipt handle has expired."
+ )
+
def remove(self, receipt_handle: str):
self.validate_receipt_handle(receipt_handle)
- decode_receipt_handle(receipt_handle)
super().remove(receipt_handle)
diff --git a/localstack-core/localstack/services/sqs/provider.py b/localstack-core/localstack/services/sqs/provider.py
index efb857dbbf573..10988383bd745 100644
--- a/localstack-core/localstack/services/sqs/provider.py
+++ b/localstack-core/localstack/services/sqs/provider.py
@@ -102,7 +102,6 @@
is_fifo_queue,
is_message_deduplication_id_required,
parse_queue_url,
- token_generator,
)
from localstack.services.stores import AccountRegionBundle
from localstack.utils.aws.arns import parse_arn
@@ -116,7 +115,7 @@
from localstack.utils.collections import PaginatedList
from localstack.utils.run import FuncThread
from localstack.utils.scheduler import Scheduler
-from localstack.utils.strings import md5
+from localstack.utils.strings import md5, token_generator
from localstack.utils.threads import start_thread
from localstack.utils.time import now
diff --git a/localstack-core/localstack/services/sqs/utils.py b/localstack-core/localstack/services/sqs/utils.py
index 70d5876454759..a280128ad7b66 100644
--- a/localstack-core/localstack/services/sqs/utils.py
+++ b/localstack-core/localstack/services/sqs/utils.py
@@ -3,7 +3,7 @@
import json
import re
import time
-from typing import Literal, Optional, Tuple
+from typing import Literal, NamedTuple, Optional, Tuple
from urllib.parse import urlparse
from localstack.aws.api.sqs import QueueAttributeName, ReceiptHandleIsInvalid
@@ -116,16 +116,25 @@ def parse_queue_url(queue_url: str) -> Tuple[str, Optional[str], str]:
return account_id, region, queue_name
-def decode_receipt_handle(receipt_handle: str) -> str:
+class ReceiptHandleInformation(NamedTuple):
+ identifier: str
+ queue_arn: str
+ message_id: str
+ last_received: str
+
+
+def extract_receipt_handle_info(receipt_handle: str) -> ReceiptHandleInformation:
try:
handle = base64.b64decode(receipt_handle).decode("utf-8")
- _, queue_arn, message_id, last_received = handle.split(" ")
- parse_arn(queue_arn) # raises a ValueError if it is not an arn
- return queue_arn
- except (IndexError, ValueError):
+ parts = handle.split(" ")
+ if len(parts) != 4:
+ raise ValueError(f'The input receipt handle "{receipt_handle}" is incomplete.')
+ parse_arn(parts[1])
+ return ReceiptHandleInformation(*parts)
+ except (IndexError, ValueError) as e:
raise ReceiptHandleIsInvalid(
f'The input receipt handle "{receipt_handle}" is not a valid receipt handle.'
- )
+ ) from e
def encode_receipt_handle(queue_arn, message) -> str:
@@ -175,9 +184,3 @@ def global_message_sequence():
def generate_message_id():
return long_uid()
-
-
-def token_generator(item: str) -> str:
- base64_bytes = base64.b64encode(item.encode("utf-8"))
- next_token = base64_bytes.decode("utf-8")
- return next_token
diff --git a/localstack-core/localstack/services/ssm/provider.py b/localstack-core/localstack/services/ssm/provider.py
index 189455ea258ef..7787daa091383 100644
--- a/localstack-core/localstack/services/ssm/provider.py
+++ b/localstack-core/localstack/services/ssm/provider.py
@@ -60,6 +60,7 @@
PatchAction,
PatchBaselineMaxResults,
PatchComplianceLevel,
+ PatchComplianceStatus,
PatchFilterGroup,
PatchIdList,
PatchOrchestratorFilterList,
@@ -201,6 +202,7 @@ def create_patch_baseline(
rejected_patches_action: PatchAction = None,
description: BaselineDescription = None,
sources: PatchSourceList = None,
+ available_security_updates_compliance_status: PatchComplianceStatus = None,
client_token: ClientToken = None,
tags: TagList = None,
**kwargs,
diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py
index 16c9109270926..95ea2ecb4d214 100644
--- a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py
+++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py
@@ -173,15 +173,12 @@ def update(
# tag handling
new_tags = update_config_props.pop("Tags", {})
- self.update_tags(ssm, model, new_tags)
+ if new_tags:
+ self.update_tags(ssm, model, new_tags)
ssm.put_parameter(Overwrite=True, Tags=[], **update_config_props)
- return ProgressEvent(
- status=OperationStatus.SUCCESS,
- resource_model=model,
- custom_context=request.custom_context,
- )
+ return self.read(request)
def update_tags(self, ssm, model, new_tags):
current_tags = ssm.list_tags_for_resource(
diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py
index 94cc1fc35817d..9f59414b844ab 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py
@@ -6,9 +6,13 @@
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import (
StateCredentials,
)
+from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.mock_eval_utils import (
+ eval_mocked_response,
+)
from localstack.services.stepfunctions.asl.eval.environment import Environment
from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for
from localstack.services.stepfunctions.asl.utils.encoding import to_json_str
+from localstack.services.stepfunctions.mocking.mock_config import MockedResponse
from localstack.utils.collections import select_from_typed_dict
from localstack.utils.strings import to_bytes
@@ -38,26 +42,46 @@ def _from_payload(payload_streaming_body: IO[bytes]) -> Union[json, str]:
return decoded_data
-def exec_lambda_function(
- env: Environment, parameters: dict, region: str, state_credentials: StateCredentials
-) -> None:
+def _mocked_invoke_lambda_function(env: Environment) -> InvocationResponse:
+ mocked_response: MockedResponse = env.get_current_mocked_response()
+ eval_mocked_response(env=env, mocked_response=mocked_response)
+ invocation_resp: InvocationResponse = env.stack.pop()
+ return invocation_resp
+
+
+def _invoke_lambda_function(
+ parameters: dict, region: str, state_credentials: StateCredentials
+) -> InvocationResponse:
lambda_client = boto_client_for(
service="lambda", region=region, state_credentials=state_credentials
)
- invocation_resp: InvocationResponse = lambda_client.invoke(**parameters)
-
- func_error: Optional[str] = invocation_resp.get("FunctionError")
+ invocation_response: InvocationResponse = lambda_client.invoke(**parameters)
- payload = invocation_resp["Payload"]
+ payload = invocation_response["Payload"]
payload_json = _from_payload(payload)
- if func_error:
- payload_str = json.dumps(payload_json, separators=(",", ":"))
- raise LambdaFunctionErrorException(func_error, payload_str)
+ invocation_response["Payload"] = payload_json
- invocation_resp["Payload"] = payload_json
+ return invocation_response
+
+
+def execute_lambda_function_integration(
+ env: Environment, parameters: dict, region: str, state_credentials: StateCredentials
+) -> None:
+ if env.is_mocked_mode():
+ invocation_response: InvocationResponse = _mocked_invoke_lambda_function(env=env)
+ else:
+ invocation_response: InvocationResponse = _invoke_lambda_function(
+ parameters=parameters, region=region, state_credentials=state_credentials
+ )
+
+ function_error: Optional[str] = invocation_response.get("FunctionError")
+ if function_error:
+ payload_json = invocation_response["Payload"]
+ payload_str = json.dumps(payload_json, separators=(",", ":"))
+ raise LambdaFunctionErrorException(function_error, payload_str)
- response = select_from_typed_dict(typed_dict=InvocationResponse, obj=invocation_resp)
+ response = select_from_typed_dict(typed_dict=InvocationResponse, obj=invocation_response) # noqa
env.stack.append(response)
diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/mock_eval_utils.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/mock_eval_utils.py
new file mode 100644
index 0000000000000..aa8a9c423f433
--- /dev/null
+++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/mock_eval_utils.py
@@ -0,0 +1,45 @@
+import copy
+
+from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails
+from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import (
+ CustomErrorName,
+)
+from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import (
+ FailureEvent,
+ FailureEventException,
+)
+from localstack.services.stepfunctions.asl.eval.environment import Environment
+from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails
+from localstack.services.stepfunctions.mocking.mock_config import (
+ MockedResponse,
+ MockedResponseReturn,
+ MockedResponseThrow,
+)
+
+
+def _eval_mocked_response_throw(env: Environment, mocked_response: MockedResponseThrow) -> None:
+ task_failed_event_details = TaskFailedEventDetails(
+ error=mocked_response.error, cause=mocked_response.cause
+ )
+ error_name = CustomErrorName(mocked_response.error)
+ failure_event = FailureEvent(
+ env=env,
+ error_name=error_name,
+ event_type=HistoryEventType.TaskFailed,
+ event_details=EventDetails(taskFailedEventDetails=task_failed_event_details),
+ )
+ raise FailureEventException(failure_event=failure_event)
+
+
+def _eval_mocked_response_return(env: Environment, mocked_response: MockedResponseReturn) -> None:
+ payload_copy = copy.deepcopy(mocked_response.payload)
+ env.stack.append(payload_copy)
+
+
+def eval_mocked_response(env: Environment, mocked_response: MockedResponse) -> None:
+ if isinstance(mocked_response, MockedResponseReturn):
+ _eval_mocked_response_return(env=env, mocked_response=mocked_response)
+ elif isinstance(mocked_response, MockedResponseThrow):
+ _eval_mocked_response_throw(env=env, mocked_response=mocked_response)
+ else:
+ raise RuntimeError(f"Invalid MockedResponse type '{type(mocked_response)}'")
diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py
index b30c9c0e1e927..c385368c25dc2 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py
@@ -33,6 +33,9 @@
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import (
StateCredentials,
)
+from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.mock_eval_utils import (
+ eval_mocked_response,
+)
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import (
ResourceRuntimePart,
ServiceResource,
@@ -44,6 +47,7 @@
from localstack.services.stepfunctions.asl.eval.environment import Environment
from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails
from localstack.services.stepfunctions.asl.utils.encoding import to_json_str
+from localstack.services.stepfunctions.mocking.mock_config import MockedResponse
from localstack.services.stepfunctions.quotas import is_within_size_quota
from localstack.utils.strings import camel_to_snake_case, snake_to_camel_case, to_bytes, to_str
@@ -352,12 +356,16 @@ def _eval_execution(self, env: Environment) -> None:
normalised_parameters = copy.deepcopy(raw_parameters)
self._normalise_parameters(normalised_parameters)
- self._eval_service_task(
- env=env,
- resource_runtime_part=resource_runtime_part,
- normalised_parameters=normalised_parameters,
- state_credentials=state_credentials,
- )
+ if env.is_mocked_mode():
+ mocked_response: MockedResponse = env.get_current_mocked_response()
+ eval_mocked_response(env=env, mocked_response=mocked_response)
+ else:
+ self._eval_service_task(
+ env=env,
+ resource_runtime_part=resource_runtime_part,
+ normalised_parameters=normalised_parameters,
+ state_credentials=state_credentials,
+ )
output_value = env.stack[-1]
self._normalise_response(output_value)
diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py
index 31c0e97dd9af5..bed6e8b78fdd5 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py
@@ -328,6 +328,9 @@ def _after_eval_execution(
normalised_parameters: dict,
state_credentials: StateCredentials,
) -> None:
+ # TODO: In Mock mode, when simulating a failure, the mock response is handled by
+ # super()._eval_execution, so this block is never executed. Consequently, the
+ # "TaskSubmitted" event isn’t recorded in the event history.
if self._is_integration_pattern():
output = env.stack[-1]
env.event_manager.add_event(
@@ -342,13 +345,13 @@ def _after_eval_execution(
)
),
)
- self._eval_integration_pattern(
- env=env,
- resource_runtime_part=resource_runtime_part,
- normalised_parameters=normalised_parameters,
- state_credentials=state_credentials,
- )
-
+ if not env.is_mocked_mode():
+ self._eval_integration_pattern(
+ env=env,
+ resource_runtime_part=resource_runtime_part,
+ normalised_parameters=normalised_parameters,
+ state_credentials=state_credentials,
+ )
super()._after_eval_execution(
env=env,
resource_runtime_part=resource_runtime_part,
diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py
index 405dcf595d799..8feebfa1cdc29 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py
@@ -124,7 +124,7 @@ def _eval_service_task(
normalised_parameters: dict,
state_credentials: StateCredentials,
):
- lambda_eval_utils.exec_lambda_function(
+ lambda_eval_utils.execute_lambda_function_integration(
env=env,
parameters=normalised_parameters,
region=resource_runtime_part.region,
diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py
index a6a9dbe0c78d3..d33fc290b611e 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py
@@ -164,7 +164,7 @@ def _eval_execution(self, env: Environment) -> None:
resource_runtime_part: ResourceRuntimePart = env.stack.pop()
parameters["Payload"] = lambda_eval_utils.to_payload_type(parameters["Payload"])
- lambda_eval_utils.exec_lambda_function(
+ lambda_eval_utils.execute_lambda_function_integration(
env=env,
parameters=parameters,
region=resource_runtime_part.region,
diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py b/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py
index c397ce86ba300..ecb90be5b8d07 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py
@@ -34,6 +34,7 @@
from localstack.services.stepfunctions.asl.eval.states import ContextObjectData, States
from localstack.services.stepfunctions.asl.eval.variable_store import VariableStore
from localstack.services.stepfunctions.backend.activity import Activity
+from localstack.services.stepfunctions.mocking.mock_config import MockedResponse, MockTestCase
LOG = logging.getLogger(__name__)
@@ -51,6 +52,7 @@ class Environment:
callback_pool_manager: CallbackPoolManager
map_run_record_pool_manager: MapRunRecordPoolManager
activity_store: Final[dict[Arn, Activity]]
+ mock_test_case: Optional[MockTestCase] = None
_frames: Final[list[Environment]]
_is_frame: bool = False
@@ -69,6 +71,7 @@ def __init__(
cloud_watch_logging_session: Optional[CloudWatchLoggingSession],
activity_store: dict[Arn, Activity],
variable_store: Optional[VariableStore] = None,
+ mock_test_case: Optional[MockTestCase] = None,
):
super(Environment, self).__init__()
self._state_mutex = threading.RLock()
@@ -86,6 +89,8 @@ def __init__(
self.activity_store = activity_store
+ self.mock_test_case = mock_test_case
+
self._frames = list()
self._is_frame = False
@@ -133,6 +138,7 @@ def as_inner_frame_of(
cloud_watch_logging_session=env.cloud_watch_logging_session,
activity_store=env.activity_store,
variable_store=variable_store,
+ mock_test_case=env.mock_test_case,
)
frame._is_frame = True
frame.event_manager = env.event_manager
@@ -262,3 +268,33 @@ def is_frame(self) -> bool:
def is_standard_workflow(self) -> bool:
return self.execution_type == StateMachineType.STANDARD
+
+ def is_mocked_mode(self) -> bool:
+ """
+ Returns True if the state machine is running in mock mode and the current
+ state has a defined mock configuration in the target environment or frame;
+ otherwise, returns False.
+ """
+ return (
+ self.mock_test_case is not None
+ and self.next_state_name in self.mock_test_case.state_mocked_responses
+ )
+
+ def get_current_mocked_response(self) -> MockedResponse:
+ if not self.is_mocked_mode():
+ raise RuntimeError(
+ "Cannot retrieve mocked response: execution is not operating in mocked mode"
+ )
+ state_name = self.next_state_name
+ state_mocked_responses: Optional = self.mock_test_case.state_mocked_responses.get(
+ state_name
+ )
+ if state_mocked_responses is None:
+ raise RuntimeError(f"No mocked response definition for state '{state_name}'")
+ retry_count = self.states.context_object.context_object_data["State"]["RetryCount"]
+ if len(state_mocked_responses.mocked_responses) <= retry_count:
+ raise RuntimeError(
+ f"No mocked response definition for state '{state_name}' "
+ f"and retry number '{retry_count}'"
+ )
+ return state_mocked_responses.mocked_responses[retry_count]
diff --git a/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py b/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py
index 5345d53a225cc..2447458683daf 100644
--- a/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py
+++ b/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py
@@ -7,6 +7,7 @@
from localstack.services.events.utils import to_json_str
_PATTERN_SINGLETON_ARRAY_ACCESS_OUTPUT: Final[str] = r"\[\d+\]$"
+_PATTERN_SLICE_OR_WILDCARD_ACCESS = r"\$(?:\.[^[]+\[(?:\*|\d*:\d*)\]|\[\*\])(?:\.[^[]+)*$"
def _is_singleton_array_access(path: str) -> bool:
@@ -14,6 +15,12 @@ def _is_singleton_array_access(path: str) -> bool:
return bool(re.search(_PATTERN_SINGLETON_ARRAY_ACCESS_OUTPUT, path))
+def _contains_slice_or_wildcard_array(path: str) -> bool:
+ # Returns true if the json path contains a slice or wildcard in the array.
+ # Slices at the root are discarded, but wildcard at the root is allowed.
+ return bool(re.search(_PATTERN_SLICE_OR_WILDCARD_ACCESS, path))
+
+
class NoSuchJsonPathError(Exception):
json_path: Final[str]
data: Final[Any]
@@ -42,6 +49,8 @@ def extract_json(path: str, data: Any) -> Any:
matches = input_expr.find(data)
if not matches:
+ if _contains_slice_or_wildcard_array(path):
+ return []
raise NoSuchJsonPathError(json_path=path, data=data)
if len(matches) > 1 or isinstance(matches[0].path, Index):
diff --git a/localstack-core/localstack/services/stepfunctions/backend/alias.py b/localstack-core/localstack/services/stepfunctions/backend/alias.py
index f6c4995bc7df8..155890abf4cb3 100644
--- a/localstack-core/localstack/services/stepfunctions/backend/alias.py
+++ b/localstack-core/localstack/services/stepfunctions/backend/alias.py
@@ -11,9 +11,11 @@
Arn,
CharacterRestrictedName,
DescribeStateMachineAliasOutput,
+ PageToken,
RoutingConfigurationList,
StateMachineAliasListItem,
)
+from localstack.utils.strings import token_generator
class Alias:
@@ -25,6 +27,7 @@ class Alias:
_state_machine_version_arns: list[Arn]
_execution_probability_distribution: list[int]
state_machine_alias_arn: Final[Arn]
+ tokenized_state_machine_alias_arn: Final[PageToken]
create_date: datetime.datetime
def __init__(
@@ -39,6 +42,7 @@ def __init__(
self.name = name
self._description = None
self.state_machine_alias_arn = f"{state_machine_arn}:{name}"
+ self.tokenized_state_machine_alias_arn = token_generator(self.state_machine_alias_arn)
self.update(description=description, routing_configuration_list=routing_configuration_list)
self.create_date = self._get_mutex_date()
diff --git a/localstack-core/localstack/services/stepfunctions/backend/execution.py b/localstack-core/localstack/services/stepfunctions/backend/execution.py
index 5f3c5aeba87d3..76090c7981944 100644
--- a/localstack-core/localstack/services/stepfunctions/backend/execution.py
+++ b/localstack-core/localstack/services/stepfunctions/backend/execution.py
@@ -59,6 +59,7 @@
StateMachineInstance,
StateMachineVersion,
)
+from localstack.services.stepfunctions.mocking.mock_config import MockTestCase
LOG = logging.getLogger(__name__)
@@ -107,6 +108,8 @@ class Execution:
state_machine_version_arn: Final[Optional[Arn]]
state_machine_alias_arn: Final[Optional[Arn]]
+ mock_test_case: Final[Optional[MockTestCase]]
+
start_date: Final[Timestamp]
input_data: Final[Optional[json]]
input_details: Final[Optional[CloudWatchEventsExecutionDataDetails]]
@@ -141,6 +144,7 @@ def __init__(
input_data: Optional[json] = None,
trace_header: Optional[TraceHeader] = None,
state_machine_alias_arn: Optional[Arn] = None,
+ mock_test_case: Optional[MockTestCase] = None,
):
self.name = name
self.sm_type = sm_type
@@ -169,6 +173,7 @@ def __init__(
self.error = None
self.cause = None
self._activity_store = activity_store
+ self.mock_test_case = mock_test_case
def _get_events_client(self):
return connect_to(aws_access_key_id=self.account_id, region_name=self.region_name).events
@@ -301,6 +306,7 @@ def _get_start_execution_worker(self) -> ExecutionWorker:
exec_comm=self._get_start_execution_worker_comm(),
cloud_watch_logging_session=self._cloud_watch_logging_session,
activity_store=self._activity_store,
+ mock_test_case=self.mock_test_case,
)
def start(self) -> None:
diff --git a/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py b/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py
index 86284dce13a84..c2d14c2085295 100644
--- a/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py
+++ b/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py
@@ -29,6 +29,7 @@
from localstack.services.stepfunctions.backend.execution_worker_comm import (
ExecutionWorkerCommunication,
)
+from localstack.services.stepfunctions.mocking.mock_config import MockTestCase
from localstack.utils.common import TMP_THREADS
@@ -36,6 +37,7 @@ class ExecutionWorker:
_evaluation_details: Final[EvaluationDetails]
_execution_communication: Final[ExecutionWorkerCommunication]
_cloud_watch_logging_session: Final[Optional[CloudWatchLoggingSession]]
+ _mock_test_case: Final[Optional[MockTestCase]]
_activity_store: dict[Arn, Activity]
env: Optional[Environment]
@@ -46,10 +48,12 @@ def __init__(
exec_comm: ExecutionWorkerCommunication,
cloud_watch_logging_session: Optional[CloudWatchLoggingSession],
activity_store: dict[Arn, Activity],
+ mock_test_case: Optional[MockTestCase] = None,
):
self._evaluation_details = evaluation_details
self._execution_communication = exec_comm
self._cloud_watch_logging_session = cloud_watch_logging_session
+ self._mock_test_case = mock_test_case
self._activity_store = activity_store
self.env = None
@@ -78,6 +82,7 @@ def _get_evaluation_environment(self) -> Environment:
event_history_context=EventHistoryContext.of_program_start(),
cloud_watch_logging_session=self._cloud_watch_logging_session,
activity_store=self._activity_store,
+ mock_test_case=self._mock_test_case,
)
def _execution_logic(self):
diff --git a/localstack-core/localstack/services/stepfunctions/mocking/__init__.py b/localstack-core/localstack/services/stepfunctions/mocking/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/services/stepfunctions/mocking/mock_config.py b/localstack-core/localstack/services/stepfunctions/mocking/mock_config.py
new file mode 100644
index 0000000000000..25f71acee35d5
--- /dev/null
+++ b/localstack-core/localstack/services/stepfunctions/mocking/mock_config.py
@@ -0,0 +1,214 @@
+import abc
+from typing import Any, Final, Optional
+
+from localstack.services.stepfunctions.mocking.mock_config_file import (
+ RawMockConfig,
+ RawResponseModel,
+ RawTestCase,
+ _load_sfn_raw_mock_config,
+)
+
+
+class MockedResponse(abc.ABC):
+ range_start: Final[int]
+ range_end: Final[int]
+
+ def __init__(self, range_start: int, range_end: int):
+ super().__init__()
+ if range_start < 0 or range_end < 0:
+ raise ValueError(
+ f"Invalid range: both '{range_start}' and '{range_end}' must be positive integers."
+ )
+ if range_start != range_end and range_end < range_start + 1:
+ raise ValueError(
+ f"Invalid range: values must be equal or '{range_start}' "
+ f"must be at least one greater than '{range_end}'."
+ )
+ self.range_start = range_start
+ self.range_end = range_end
+
+
+class MockedResponseReturn(MockedResponse):
+ payload: Final[Any]
+
+ def __init__(self, range_start: int, range_end: int, payload: Any):
+ super().__init__(range_start=range_start, range_end=range_end)
+ self.payload = payload
+
+
+class MockedResponseThrow(MockedResponse):
+ error: Final[str]
+ cause: Final[str]
+
+ def __init__(self, range_start: int, range_end: int, error: str, cause: str):
+ super().__init__(range_start=range_start, range_end=range_end)
+ self.error = error
+ self.cause = cause
+
+
+class StateMockedResponses:
+ state_name: Final[str]
+ mocked_response_name: Final[str]
+ mocked_responses: Final[list[MockedResponse]]
+
+ def __init__(
+ self, state_name: str, mocked_response_name: str, mocked_responses: list[MockedResponse]
+ ):
+ self.state_name = state_name
+ self.mocked_response_name = mocked_response_name
+ self.mocked_responses = list()
+ last_range_end: int = -1
+ mocked_responses_sorted = sorted(mocked_responses, key=lambda mr: mr.range_start)
+ for mocked_response in mocked_responses_sorted:
+ if not mocked_response.range_start - last_range_end == 1:
+ raise RuntimeError(
+ f"Inconsistent event numbering detected for state '{state_name}': "
+ f"the previous mocked response ended at event '{last_range_end}' "
+ f"while the next response '{mocked_response_name}' "
+ f"starts at event '{mocked_response.range_start}'. "
+ "Mock responses must be consecutively numbered. "
+ f"Expected the next response to begin at event {last_range_end + 1}."
+ )
+ repeats = mocked_response.range_end - mocked_response.range_start + 1
+ self.mocked_responses.extend([mocked_response] * repeats)
+ last_range_end = mocked_response.range_end
+
+
+class MockTestCase:
+ state_machine_name: Final[str]
+ test_case_name: Final[str]
+ state_mocked_responses: Final[dict[str, StateMockedResponses]]
+
+ def __init__(
+ self,
+ state_machine_name: str,
+ test_case_name: str,
+ state_mocked_responses_list: list[StateMockedResponses],
+ ):
+ self.state_machine_name = state_machine_name
+ self.test_case_name = test_case_name
+ self.state_mocked_responses = dict()
+ for state_mocked_response in state_mocked_responses_list:
+ state_name = state_mocked_response.state_name
+ if state_name in self.state_mocked_responses:
+ raise RuntimeError(
+ f"Duplicate definition of state '{state_name}' for test case '{test_case_name}'"
+ )
+ self.state_mocked_responses[state_name] = state_mocked_response
+
+
+def _parse_mocked_response_range(string_definition: str) -> tuple[int, int]:
+ definition_parts = string_definition.strip().split("-")
+ if len(definition_parts) == 1:
+ range_part = definition_parts[0]
+ try:
+ range_value = int(range_part)
+ return range_value, range_value
+ except Exception:
+ raise RuntimeError(
+ f"Unknown mocked response retry range value '{range_part}', not a valid integer"
+ )
+ elif len(definition_parts) == 2:
+ range_part_start = definition_parts[0]
+ range_part_end = definition_parts[1]
+ try:
+ return int(range_part_start), int(range_part_end)
+ except Exception:
+ raise RuntimeError(
+ f"Unknown mocked response retry range value '{range_part_start}:{range_part_end}', "
+ "not valid integer values"
+ )
+ else:
+ raise RuntimeError(
+ f"Unknown mocked response retry range definition '{string_definition}', "
+ "range definition should consist of one integer (e.g. '0'), or a integer range (e.g. '1-2')'."
+ )
+
+
+def _mocked_response_from_raw(
+ raw_response_model_range: str, raw_response_model: RawResponseModel
+) -> MockedResponse:
+ range_start, range_end = _parse_mocked_response_range(raw_response_model_range)
+ if raw_response_model.Return:
+ payload = raw_response_model.Return.model_dump()
+ return MockedResponseReturn(range_start=range_start, range_end=range_end, payload=payload)
+ throw_definition = raw_response_model.Throw
+ return MockedResponseThrow(
+ range_start=range_start,
+ range_end=range_end,
+ error=throw_definition.Error,
+ cause=throw_definition.Cause,
+ )
+
+
+def _mocked_responses_from_raw(
+ mocked_response_name: str, raw_mock_config: RawMockConfig
+) -> list[MockedResponse]:
+ raw_response_models: Optional[dict[str, RawResponseModel]] = (
+ raw_mock_config.MockedResponses.get(mocked_response_name)
+ )
+ if not raw_response_models:
+ raise RuntimeError(
+ f"No definitions for mocked response '{mocked_response_name}' in the mock configuration file."
+ )
+ mocked_responses: list[MockedResponse] = list()
+ for raw_response_model_range, raw_response_model in raw_response_models.items():
+ mocked_response: MockedResponse = _mocked_response_from_raw(
+ raw_response_model_range=raw_response_model_range, raw_response_model=raw_response_model
+ )
+ mocked_responses.append(mocked_response)
+ return mocked_responses
+
+
+def _state_mocked_responses_from_raw(
+ state_name: str, mocked_response_name: str, raw_mock_config: RawMockConfig
+) -> StateMockedResponses:
+ mocked_responses = _mocked_responses_from_raw(
+ mocked_response_name=mocked_response_name, raw_mock_config=raw_mock_config
+ )
+ return StateMockedResponses(
+ state_name=state_name,
+ mocked_response_name=mocked_response_name,
+ mocked_responses=mocked_responses,
+ )
+
+
+def _mock_test_case_from_raw(
+ state_machine_name: str, test_case_name: str, raw_mock_config: RawMockConfig
+) -> MockTestCase:
+ state_machine = raw_mock_config.StateMachines.get(state_machine_name)
+ if not state_machine:
+ raise RuntimeError(
+ f"No definitions for state machine '{state_machine_name}' in the mock configuration file."
+ )
+ test_case: RawTestCase = state_machine.TestCases.get(test_case_name)
+ if not test_case:
+ raise RuntimeError(
+ f"No definitions for test case '{test_case_name}' and "
+ f"state machine '{state_machine_name}' in the mock configuration file."
+ )
+ state_mocked_responses_list: list[StateMockedResponses] = list()
+ for state_name, mocked_response_name in test_case.root.items():
+ state_mocked_responses = _state_mocked_responses_from_raw(
+ state_name=state_name,
+ mocked_response_name=mocked_response_name,
+ raw_mock_config=raw_mock_config,
+ )
+ state_mocked_responses_list.append(state_mocked_responses)
+ return MockTestCase(
+ state_machine_name=state_machine_name,
+ test_case_name=test_case_name,
+ state_mocked_responses_list=state_mocked_responses_list,
+ )
+
+
+def load_mock_test_case_for(state_machine_name: str, test_case_name: str) -> Optional[MockTestCase]:
+ raw_mock_config: Optional[RawMockConfig] = _load_sfn_raw_mock_config()
+ if raw_mock_config is None:
+ return None
+ mock_test_case: MockTestCase = _mock_test_case_from_raw(
+ state_machine_name=state_machine_name,
+ test_case_name=test_case_name,
+ raw_mock_config=raw_mock_config,
+ )
+ return mock_test_case
diff --git a/localstack-core/localstack/services/stepfunctions/mocking/mock_config_file.py b/localstack-core/localstack/services/stepfunctions/mocking/mock_config_file.py
new file mode 100644
index 0000000000000..145ffd20750a2
--- /dev/null
+++ b/localstack-core/localstack/services/stepfunctions/mocking/mock_config_file.py
@@ -0,0 +1,187 @@
+import logging
+import os
+from functools import lru_cache
+from json import JSONDecodeError
+from typing import Any, Dict, Final, Optional
+
+from pydantic import BaseModel, RootModel, ValidationError, model_validator
+
+from localstack import config
+
+LOG = logging.getLogger(__name__)
+
+_RETURN_KEY: Final[str] = "Return"
+_THROW_KEY: Final[str] = "Throw"
+
+
+class RawReturnResponse(RootModel[Any]):
+ """
+ Represents a return response.
+ Accepts any fields.
+ """
+
+ model_config = {"frozen": True}
+
+
+class RawThrowResponse(BaseModel):
+ """
+ Represents an error response.
+ Both 'Error' and 'Cause' are required.
+ """
+
+ model_config = {"frozen": True}
+
+ Error: str
+ Cause: str
+
+
+class RawResponseModel(BaseModel):
+ """
+ A response step must include exactly one of:
+ - 'Return': a ReturnResponse object.
+ - 'Throw': a ThrowResponse object.
+ """
+
+ model_config = {"frozen": True}
+
+ Return: Optional[RawReturnResponse] = None
+ Throw: Optional[RawThrowResponse] = None
+
+ @model_validator(mode="before")
+ def validate_response(cls, data: dict) -> dict:
+ if _RETURN_KEY in data and _THROW_KEY in data:
+ raise ValueError(f"Response cannot contain both '{_RETURN_KEY}' and '{_THROW_KEY}'")
+ if _RETURN_KEY not in data and _THROW_KEY not in data:
+ raise ValueError(f"Response must contain one of '{_RETURN_KEY}' or '{_THROW_KEY}'")
+ return data
+
+
+class RawTestCase(RootModel[Dict[str, str]]):
+ """
+ Represents an individual test case.
+ The keys are state names (e.g., 'LambdaState', 'SQSState')
+ and the values are the names of the mocked response configurations.
+ """
+
+ model_config = {"frozen": True}
+
+
+class RawStateMachine(BaseModel):
+ """
+ Represents a state machine configuration containing multiple test cases.
+ """
+
+ model_config = {"frozen": True}
+
+ TestCases: Dict[str, RawTestCase]
+
+
+class RawMockConfig(BaseModel):
+ """
+ The root configuration that contains:
+ - StateMachines: mapping state machine names to their configuration.
+ - MockedResponses: mapping response configuration names to response steps.
+ Each response step is keyed (e.g. "0", "1-2") and maps to a ResponseModel.
+ """
+
+ model_config = {"frozen": True}
+
+ StateMachines: Dict[str, RawStateMachine]
+ MockedResponses: Dict[str, Dict[str, RawResponseModel]]
+
+
+@lru_cache(maxsize=1)
+def _read_sfn_raw_mock_config(file_path: str, modified_epoch: int) -> Optional[RawMockConfig]: # noqa
+ """
+ Load and cache the Step Functions mock configuration from a JSON file.
+
+ This function is memoized using `functools.lru_cache` to avoid re-reading the file
+ from disk unless it has changed. The `modified_epoch` parameter is used solely to
+ trigger cache invalidation when the file is updated. If either the file path or the
+ modified timestamp changes, the cached result is discarded and the file is reloaded.
+
+ Parameters:
+ file_path (str):
+ The absolute path to the JSON configuration file.
+
+ modified_epoch (int):
+ The last modified time of the file, in epoch seconds. This value is used
+ as part of the cache key to ensure the cache is refreshed when the file is updated.
+
+ Returns:
+ Optional[dict]:
+ The parsed configuration as a dictionary if the file is successfully loaded,
+ or `None` if an error occurs during reading or parsing.
+
+ Notes:
+ - The `modified_epoch` argument is not used inside the function logic, but is
+ necessary to ensure cache correctness via `lru_cache`.
+ - Logging is used to capture warnings if file access or parsing fails.
+ """
+ try:
+ with open(file_path, "r") as df:
+ mock_config_str = df.read()
+ mock_config: RawMockConfig = RawMockConfig.model_validate_json(mock_config_str)
+ return mock_config
+ except (OSError, IOError) as file_error:
+ LOG.error("Failed to open mock configuration file '%s'. Error: %s", file_path, file_error)
+ return None
+ except ValidationError as validation_error:
+ errors = validation_error.errors()
+ if not errors:
+ # No detailed errors provided by Pydantic
+ LOG.error(
+ "Validation failed for mock configuration file at '%s'. "
+ "The file must contain a valid mock configuration.",
+ file_path,
+ )
+ else:
+ for err in errors:
+ location = ".".join(str(loc) for loc in err["loc"])
+ message = err["msg"]
+ error_type = err["type"]
+ LOG.error(
+ "Mock configuration file error at '%s': %s (%s)",
+ location,
+ message,
+ error_type,
+ )
+ # TODO: add tests to ensure the hot-reloading of the mock configuration
+ # file works as expected, and inform the user with the info below:
+ # LOG.info(
+ # "Changes to the mock configuration file will be applied at the "
+ # "next mock execution without requiring a LocalStack restart."
+ # )
+ return None
+ except JSONDecodeError as json_error:
+ LOG.error(
+ "Malformed JSON in mock configuration file at '%s'. Error: %s",
+ file_path,
+ json_error,
+ )
+ # TODO: add tests to ensure the hot-reloading of the mock configuration
+ # file works as expected, and inform the user with the info below:
+ # LOG.info(
+ # "Changes to the mock configuration file will be applied at the "
+ # "next mock execution without requiring a LocalStack restart."
+ # )
+ return None
+
+
+def _load_sfn_raw_mock_config() -> Optional[RawMockConfig]:
+ configuration_file_path = config.SFN_MOCK_CONFIG
+ if not configuration_file_path:
+ return None
+
+ try:
+ modified_time = int(os.path.getmtime(configuration_file_path))
+ except Exception as ex:
+ LOG.warning(
+ "Unable to access the step functions mock configuration file at '%s' due to %s",
+ configuration_file_path,
+ ex,
+ )
+ return None
+
+ mock_config = _read_sfn_raw_mock_config(configuration_file_path, modified_time)
+ return mock_config
diff --git a/localstack-core/localstack/services/stepfunctions/provider.py b/localstack-core/localstack/services/stepfunctions/provider.py
index 40f7bbb6e4483..c43fd396c9a8f 100644
--- a/localstack-core/localstack/services/stepfunctions/provider.py
+++ b/localstack-core/localstack/services/stepfunctions/provider.py
@@ -150,6 +150,10 @@
from localstack.services.stepfunctions.backend.test_state.execution import (
TestStateExecution,
)
+from localstack.services.stepfunctions.mocking.mock_config import (
+ MockTestCase,
+ load_mock_test_case_for,
+)
from localstack.services.stepfunctions.stepfunctions_utils import (
assert_pagination_parameters_valid,
get_next_page_token_from_arn,
@@ -180,7 +184,7 @@ def accept_state_visitor(self, visitor: StateVisitor):
visitor.visit(sfn_stores)
_STATE_MACHINE_ARN_REGEX: Final[re.Pattern] = re.compile(
- rf"{ARN_PARTITION_REGEX}:states:[a-z0-9-]+:[0-9]{{12}}:stateMachine:[a-zA-Z0-9-_.]+(:\d+)?(:[a-zA-Z0-9-_.]+)*$"
+ rf"{ARN_PARTITION_REGEX}:states:[a-z0-9-]+:[0-9]{{12}}:stateMachine:[a-zA-Z0-9-_.]+(:\d+)?(:[a-zA-Z0-9-_.]+)*(?:#[a-zA-Z0-9-_]+)?$"
)
_STATE_MACHINE_EXECUTION_ARN_REGEX: Final[re.Pattern] = re.compile(
@@ -779,6 +783,12 @@ def start_execution(
) -> StartExecutionOutput:
self._validate_state_machine_arn(state_machine_arn)
+ state_machine_arn_parts = state_machine_arn.split("#")
+ state_machine_arn = state_machine_arn_parts[0]
+ mock_test_case_name = (
+ state_machine_arn_parts[1] if len(state_machine_arn_parts) == 2 else None
+ )
+
store = self.get_store(context=context)
alias: Optional[Alias] = store.aliases.get(state_machine_arn)
@@ -832,6 +842,20 @@ def start_execution(
configuration=state_machine_clone.cloud_watch_logging_configuration,
)
+ mock_test_case: Optional[MockTestCase] = None
+ if mock_test_case_name is not None:
+ state_machine_name = state_machine_clone.name
+ mock_test_case = load_mock_test_case_for(
+ state_machine_name=state_machine_name, test_case_name=mock_test_case_name
+ )
+ if mock_test_case is None:
+ raise InvalidName(
+ f"Invalid mock test case name '{mock_test_case_name}' "
+ f"for state machine '{state_machine_name}'."
+ "Either the test case is not defined or the mock configuration file "
+ "could not be loaded. See logs for details."
+ )
+
execution = Execution(
name=exec_name,
sm_type=state_machine_clone.sm_type,
@@ -846,6 +870,7 @@ def start_execution(
input_data=input_data,
trace_header=trace_header,
activity_store=self.get_store(context).activities,
+ mock_test_case=mock_test_case,
)
store.executions[exec_arn] = execution
@@ -1057,7 +1082,8 @@ def list_state_machine_aliases(
max_results: PageSize = None,
**kwargs,
) -> ListStateMachineAliasesOutput:
- # TODO: add pagination support.
+ assert_pagination_parameters_valid(max_results, next_token)
+
self._validate_state_machine_arn(state_machine_arn)
state_machines = self.get_store(context).state_machines
state_machine_revision = state_machines.get(state_machine_arn)
@@ -1065,11 +1091,31 @@ def list_state_machine_aliases(
raise InvalidArn(f"Invalid arn: {state_machine_arn}")
state_machine_aliases: StateMachineAliasList = list()
+ valid_token_found = next_token is None
+
for alias in state_machine_revision.aliases:
state_machine_aliases.append(alias.to_item())
+ if alias.tokenized_state_machine_alias_arn == next_token:
+ valid_token_found = True
+
+ if not valid_token_found:
+ raise InvalidToken("Invalid Token: 'Invalid token'")
+
state_machine_aliases.sort(key=lambda item: item["creationDate"])
- return ListStateMachineAliasesOutput(stateMachineAliases=state_machine_aliases)
+ paginated_list = PaginatedList(state_machine_aliases)
+
+ paginated_aliases, next_token = paginated_list.get_page(
+ token_generator=lambda item: get_next_page_token_from_arn(
+ item.get("stateMachineAliasArn")
+ ),
+ next_token=next_token,
+ page_size=100 if max_results == 0 or max_results is None else max_results,
+ )
+
+ return ListStateMachineAliasesOutput(
+ stateMachineAliases=paginated_aliases, nextToken=next_token
+ )
def list_state_machine_versions(
self,
diff --git a/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py b/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py
index a331f44efcd1c..95133b4ed47e8 100644
--- a/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py
+++ b/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py
@@ -46,7 +46,7 @@ def assert_pagination_parameters_valid(
next_token: str,
next_token_length_limit: int = 1024,
max_results_upper_limit: int = 1000,
-) -> tuple[int, str]:
+) -> None:
validation_errors = []
match max_results:
diff --git a/localstack-core/localstack/services/sts/models.py b/localstack-core/localstack/services/sts/models.py
index 7d4d6020b0467..67a8665dbb76f 100644
--- a/localstack-core/localstack/services/sts/models.py
+++ b/localstack-core/localstack/services/sts/models.py
@@ -1,9 +1,19 @@
+from typing import TypedDict
+
+from localstack.aws.api.sts import Tag
from localstack.services.stores import AccountRegionBundle, BaseStore, CrossRegionAttribute
+class SessionTaggingConfig(TypedDict):
+ # => {"Key": , "Value": }
+ tags: dict[str, Tag]
+ # list of lowercase transitive tag keys
+ transitive_tags: list[str]
+
+
class STSStore(BaseStore):
- # maps access key ids to tags for the session they belong to
- session_tags: dict[str, dict[str, str]] = CrossRegionAttribute(default=dict)
+ # maps access key ids to tagging config for the session they belong to
+ session_tags: dict[str, SessionTaggingConfig] = CrossRegionAttribute(default=dict)
sts_stores = AccountRegionBundle("sts", STSStore)
diff --git a/localstack-core/localstack/services/sts/provider.py b/localstack-core/localstack/services/sts/provider.py
index 006a510a612ce..14807869ea9cb 100644
--- a/localstack-core/localstack/services/sts/provider.py
+++ b/localstack-core/localstack/services/sts/provider.py
@@ -1,6 +1,6 @@
import logging
-from localstack.aws.api import RequestContext
+from localstack.aws.api import RequestContext, ServiceException
from localstack.aws.api.sts import (
AssumeRoleResponse,
GetCallerIdentityResponse,
@@ -21,12 +21,19 @@
from localstack.services.iam.iam_patches import apply_iam_patches
from localstack.services.moto import call_moto
from localstack.services.plugins import ServiceLifecycleHook
-from localstack.services.sts.models import sts_stores
+from localstack.services.sts.models import SessionTaggingConfig, sts_stores
from localstack.utils.aws.arns import extract_account_id_from_arn
+from localstack.utils.aws.request_context import extract_access_key_id_from_auth_header
LOG = logging.getLogger(__name__)
+class InvalidParameterValueError(ServiceException):
+ code = "InvalidParameterValue"
+ status_code = 400
+ sender_fault = True
+
+
class StsProvider(StsApi, ServiceLifecycleHook):
def __init__(self):
apply_iam_patches()
@@ -54,15 +61,47 @@ def assume_role(
provided_contexts: ProvidedContextsListType = None,
**kwargs,
) -> AssumeRoleResponse:
- response: AssumeRoleResponse = call_moto(context)
+ target_account_id = extract_account_id_from_arn(role_arn)
+ access_key_id = extract_access_key_id_from_auth_header(context.request.headers)
+ store = sts_stores[target_account_id]["us-east-1"]
+ existing_tagging_config = store.session_tags.get(access_key_id, {})
if tags:
- transformed_tags = {tag["Key"]: tag["Value"] for tag in tags}
- # we should save it in the store of the role account, not the requester
- account_id = extract_account_id_from_arn(role_arn)
- # the region is hardcoded to "us-east-1" as IAM/STS are global services
- # this will only differ for other partitions, which are not yet supported
- store = sts_stores[account_id]["us-east-1"]
+ tag_keys = {tag["Key"].lower() for tag in tags}
+ # if the lower-cased set is smaller than the number of keys, there have to be some duplicates.
+ if len(tag_keys) < len(tags):
+ raise InvalidParameterValueError(
+ "Duplicate tag keys found. Please note that Tag keys are case insensitive."
+ )
+
+ # prevent transitive tags from being overridden
+ if existing_tagging_config:
+ if set(existing_tagging_config["transitive_tags"]).intersection(tag_keys):
+ raise InvalidParameterValueError(
+ "One of the specified transitive tag keys can't be set because it conflicts with a transitive tag key from the calling session."
+ )
+ if transitive_tag_keys:
+ transitive_tag_key_set = {key.lower() for key in transitive_tag_keys}
+ if not transitive_tag_key_set <= tag_keys:
+ raise InvalidParameterValueError(
+ "The specified transitive tag key must be included in the requested tags."
+ )
+
+ response: AssumeRoleResponse = call_moto(context)
+
+ transitive_tag_keys = transitive_tag_keys or []
+ tags = tags or []
+ transformed_tags = {tag["Key"].lower(): tag for tag in tags}
+ # propagate transitive tags
+ if existing_tagging_config:
+ for tag in existing_tagging_config["transitive_tags"]:
+ transformed_tags[tag] = existing_tagging_config["tags"][tag]
+ transitive_tag_keys += existing_tagging_config["transitive_tags"]
+ if transformed_tags:
+ # store session tagging config
access_key_id = response["Credentials"]["AccessKeyId"]
- store.session_tags[access_key_id] = transformed_tags
+ store.session_tags[access_key_id] = SessionTaggingConfig(
+ tags=transformed_tags,
+ transitive_tags=[key.lower() for key in transitive_tag_keys],
+ )
return response
diff --git a/localstack-core/localstack/services/transcribe/provider.py b/localstack-core/localstack/services/transcribe/provider.py
index f33f3c4b1013a..79a9cea6d50b2 100644
--- a/localstack-core/localstack/services/transcribe/provider.py
+++ b/localstack-core/localstack/services/transcribe/provider.py
@@ -1,7 +1,6 @@
import datetime
import json
import logging
-import os
import threading
import wave
from functools import cache
@@ -44,6 +43,11 @@
from localstack.utils.run import run
from localstack.utils.threads import start_thread
+# Amazon Transcribe service calls are limited to four hours (or 2 GB) per API call for our batch service.
+# The streaming service can accommodate open connections up to four hours long.
+# See https://aws.amazon.com/transcribe/faqs/
+MAX_AUDIO_DURATION_SECONDS = 60 * 60 * 4
+
LOG = logging.getLogger(__name__)
VOSK_MODELS_URL = f"{HUGGING_FACE_ENDPOINT}/vosk-models/resolve/main/"
@@ -124,8 +128,6 @@ def _setup_vosk() -> None:
# Install and configure vosk
vosk_package.install()
- # Vosk must be imported only after setting the required env vars
- os.environ["VOSK_MODEL_PATH"] = str(LANGUAGE_MODEL_DIR)
from vosk import SetLogLevel # noqa
# Suppress Vosk logging
@@ -230,7 +232,7 @@ def delete_transcription_job(
#
@staticmethod
- def download_model(name: str):
+ def download_model(name: str) -> str:
"""
Download a Vosk language model to LocalStack cache directory. Do nothing if model is already downloaded.
@@ -240,8 +242,10 @@ def download_model(name: str):
model_path = LANGUAGE_MODEL_DIR / name
with _DL_LOCK:
- if model_path.exists():
- return
+ # check if model path exists and is not empty
+ if model_path.exists() and any(model_path.iterdir()):
+ LOG.debug("Using a pre-downloaded language model: %s", model_path)
+ return str(model_path)
else:
model_path.mkdir(parents=True)
@@ -267,6 +271,8 @@ def download_model(name: str):
Path(model_zip_path).unlink()
+ return str(model_path)
+
#
# Threads
#
@@ -304,6 +310,11 @@ def _run_transcription_job(self, args: Tuple[TranscribeStore, str]):
format = ffprobe_output["format"]["format_name"]
LOG.debug("Media format detected as: %s", format)
job["MediaFormat"] = SUPPORTED_FORMAT_NAMES[format]
+ duration = ffprobe_output["format"]["duration"]
+
+ if float(duration) >= MAX_AUDIO_DURATION_SECONDS:
+ failure_reason = "Invalid file size: file size too large. Maximum audio duration is 4.000000 hours.Check the length of the file and try your request again."
+ raise RuntimeError()
# Determine the sample rate of input audio if possible
for stream in ffprobe_output["streams"]:
@@ -338,10 +349,10 @@ def _run_transcription_job(self, args: Tuple[TranscribeStore, str]):
language_code = job["LanguageCode"]
model_name = LANGUAGE_MODELS[language_code]
self._setup_vosk()
- self.download_model(model_name)
+ model_path = self.download_model(model_name)
from vosk import KaldiRecognizer, Model # noqa
- model = Model(model_name=model_name)
+ model = Model(model_path=model_path, model_name=model_name)
tc = KaldiRecognizer(model, audio.getframerate())
tc.SetWords(True)
diff --git a/localstack-core/localstack/state/core.py b/localstack-core/localstack/state/core.py
index aa27a84fc843e..ae41f47b17469 100644
--- a/localstack-core/localstack/state/core.py
+++ b/localstack-core/localstack/state/core.py
@@ -27,27 +27,27 @@ class StateLifecycleHook:
- load: the state is injected into the service, or state directories on disk are restored
"""
- def on_before_state_reset(self):
+ def on_before_state_reset(self) -> None:
"""Hook triggered before the provider's state containers are reset/cleared."""
pass
- def on_after_state_reset(self):
+ def on_after_state_reset(self) -> None:
"""Hook triggered after the provider's state containers have been reset/cleared."""
pass
- def on_before_state_save(self):
+ def on_before_state_save(self) -> None:
"""Hook triggered before the provider's state containers are saved."""
pass
- def on_after_state_save(self):
+ def on_after_state_save(self) -> None:
"""Hook triggered after the provider's state containers have been saved."""
pass
- def on_before_state_load(self):
+ def on_before_state_load(self) -> None:
"""Hook triggered before a previously serialized state is loaded into the provider's state containers."""
pass
- def on_after_state_load(self):
+ def on_after_state_load(self) -> None:
"""Hook triggered after a previously serialized state has been loaded into the provider's state containers."""
pass
diff --git a/localstack-core/localstack/testing/pytest/cloudformation/__init__.py b/localstack-core/localstack/testing/pytest/cloudformation/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/testing/pytest/cloudformation/fixtures.py b/localstack-core/localstack/testing/pytest/cloudformation/fixtures.py
new file mode 100644
index 0000000000000..e2c42d38076ca
--- /dev/null
+++ b/localstack-core/localstack/testing/pytest/cloudformation/fixtures.py
@@ -0,0 +1,169 @@
+import json
+from collections import defaultdict
+from typing import Callable
+
+import pytest
+
+from localstack.aws.api.cloudformation import StackEvent
+from localstack.aws.connect import ServiceLevelClientFactory
+from localstack.utils.functions import call_safe
+from localstack.utils.strings import short_uid
+
+PerResourceStackEvents = dict[str, list[StackEvent]]
+
+
+@pytest.fixture
+def capture_per_resource_events(
+ aws_client: ServiceLevelClientFactory,
+) -> Callable[[str], PerResourceStackEvents]:
+ def capture(stack_name: str) -> PerResourceStackEvents:
+ events = aws_client.cloudformation.describe_stack_events(StackName=stack_name)[
+ "StackEvents"
+ ]
+ per_resource_events = defaultdict(list)
+ for event in events:
+ if logical_resource_id := event.get("LogicalResourceId"):
+ per_resource_events[logical_resource_id].append(event)
+ return per_resource_events
+
+ return capture
+
+
+@pytest.fixture
+def capture_update_process(aws_client_no_retry, cleanups, capture_per_resource_events):
+ """
+ Fixture to deploy a new stack (via creating and executing a change set), then updating the
+ stack with a second template (via creating and executing a change set).
+ """
+
+ stack_name = f"stack-{short_uid()}"
+ change_set_name = f"cs-{short_uid()}"
+
+ def inner(
+ snapshot, t1: dict | str, t2: dict | str, p1: dict | None = None, p2: dict | None = None
+ ):
+ snapshot.add_transformer(snapshot.transform.cloudformation_api())
+
+ if isinstance(t1, dict):
+ t1 = json.dumps(t1)
+ elif isinstance(t1, str):
+ with open(t1) as infile:
+ t1 = infile.read()
+ if isinstance(t2, dict):
+ t2 = json.dumps(t2)
+ elif isinstance(t2, str):
+ with open(t2) as infile:
+ t2 = infile.read()
+
+ p1 = p1 or {}
+ p2 = p2 or {}
+
+ # deploy original stack
+ change_set_details = aws_client_no_retry.cloudformation.create_change_set(
+ StackName=stack_name,
+ ChangeSetName=change_set_name,
+ TemplateBody=t1,
+ ChangeSetType="CREATE",
+ Parameters=[{"ParameterKey": k, "ParameterValue": v} for (k, v) in p1.items()],
+ )
+ snapshot.match("create-change-set-1", change_set_details)
+ stack_id = change_set_details["StackId"]
+ change_set_id = change_set_details["Id"]
+ aws_client_no_retry.cloudformation.get_waiter("change_set_create_complete").wait(
+ ChangeSetName=change_set_id
+ )
+ cleanups.append(
+ lambda: call_safe(
+ aws_client_no_retry.cloudformation.delete_change_set,
+ kwargs=dict(ChangeSetName=change_set_id),
+ )
+ )
+
+ describe_change_set_with_prop_values = (
+ aws_client_no_retry.cloudformation.describe_change_set(
+ ChangeSetName=change_set_id, IncludePropertyValues=True
+ )
+ )
+ snapshot.match("describe-change-set-1-prop-values", describe_change_set_with_prop_values)
+ describe_change_set_without_prop_values = (
+ aws_client_no_retry.cloudformation.describe_change_set(
+ ChangeSetName=change_set_id, IncludePropertyValues=False
+ )
+ )
+ snapshot.match("describe-change-set-1", describe_change_set_without_prop_values)
+
+ execute_results = aws_client_no_retry.cloudformation.execute_change_set(
+ ChangeSetName=change_set_id
+ )
+ snapshot.match("execute-change-set-1", execute_results)
+ aws_client_no_retry.cloudformation.get_waiter("stack_create_complete").wait(
+ StackName=stack_id
+ )
+
+ # ensure stack deletion
+ cleanups.append(
+ lambda: call_safe(
+ aws_client_no_retry.cloudformation.delete_stack, kwargs=dict(StackName=stack_id)
+ )
+ )
+
+ describe = aws_client_no_retry.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][
+ 0
+ ]
+ snapshot.match("post-create-1-describe", describe)
+
+ # update stack
+ change_set_details = aws_client_no_retry.cloudformation.create_change_set(
+ StackName=stack_name,
+ ChangeSetName=change_set_name,
+ TemplateBody=t2,
+ ChangeSetType="UPDATE",
+ Parameters=[{"ParameterKey": k, "ParameterValue": v} for (k, v) in p2.items()],
+ )
+ snapshot.match("create-change-set-2", change_set_details)
+ stack_id = change_set_details["StackId"]
+ change_set_id = change_set_details["Id"]
+ aws_client_no_retry.cloudformation.get_waiter("change_set_create_complete").wait(
+ ChangeSetName=change_set_id
+ )
+
+ describe_change_set_with_prop_values = (
+ aws_client_no_retry.cloudformation.describe_change_set(
+ ChangeSetName=change_set_id, IncludePropertyValues=True
+ )
+ )
+ snapshot.match("describe-change-set-2-prop-values", describe_change_set_with_prop_values)
+ describe_change_set_without_prop_values = (
+ aws_client_no_retry.cloudformation.describe_change_set(
+ ChangeSetName=change_set_id, IncludePropertyValues=False
+ )
+ )
+ snapshot.match("describe-change-set-2", describe_change_set_without_prop_values)
+
+ execute_results = aws_client_no_retry.cloudformation.execute_change_set(
+ ChangeSetName=change_set_id
+ )
+ snapshot.match("execute-change-set-2", execute_results)
+ aws_client_no_retry.cloudformation.get_waiter("stack_update_complete").wait(
+ StackName=stack_id
+ )
+
+ describe = aws_client_no_retry.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][
+ 0
+ ]
+ snapshot.match("post-create-2-describe", describe)
+
+ events = capture_per_resource_events(stack_name)
+ snapshot.match("per-resource-events", events)
+
+ # delete stack
+ aws_client_no_retry.cloudformation.delete_stack(StackName=stack_id)
+ aws_client_no_retry.cloudformation.get_waiter("stack_delete_complete").wait(
+ StackName=stack_id
+ )
+ describe = aws_client_no_retry.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][
+ 0
+ ]
+ snapshot.match("delete-describe", describe)
+
+ yield inner
diff --git a/localstack-core/localstack/testing/pytest/fixtures.py b/localstack-core/localstack/testing/pytest/fixtures.py
index d526097aef1cb..a127e9a94aab5 100644
--- a/localstack-core/localstack/testing/pytest/fixtures.py
+++ b/localstack-core/localstack/testing/pytest/fixtures.py
@@ -67,6 +67,38 @@
from mypy_boto3_sqs.type_defs import MessageTypeDef
+@pytest.fixture(scope="session")
+def aws_client_no_retry(aws_client_factory):
+ """
+ This fixture can be used to obtain Boto clients with disabled retries for testing.
+ botocore docs: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#configuring-a-retry-mode
+
+ Use this client when testing exceptions (i.e., with pytest.raises(...)) or expected errors (e.g., status code 500)
+ to avoid unnecessary retries and mitigate test flakiness if the tested error condition is time-bound.
+
+ This client is needed for the following errors, exceptions, and HTTP status codes defined by the legacy retry mode:
+ https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#legacy-retry-mode
+ General socket/connection errors:
+ * ConnectionError
+ * ConnectionClosedError
+ * ReadTimeoutError
+ * EndpointConnectionError
+
+ Service-side throttling/limit errors and exceptions:
+ * Throttling
+ * ThrottlingException
+ * ThrottledException
+ * RequestThrottledException
+ * ProvisionedThroughputExceededException
+
+ HTTP status codes: 429, 500, 502, 503, 504, and 509
+
+ Hence, this client is not needed for a `ResourceNotFound` error (but it doesn't harm).
+ """
+ no_retry_config = botocore.config.Config(retries={"max_attempts": 1})
+ return aws_client_factory(config=no_retry_config)
+
+
@pytest.fixture(scope="class")
def aws_http_client_factory(aws_session):
"""
@@ -1941,6 +1973,30 @@ def factory(email_address: str) -> None:
aws_client.ses.delete_identity(Identity=identity)
+@pytest.fixture
+def setup_sender_email_address(ses_verify_identity):
+ """
+ If the test is running against AWS then assume the email address passed is already
+ verified, and passes the given email address through. Otherwise, it generates one random
+ email address and verify them.
+ """
+
+ def inner(sender_email_address: Optional[str] = None) -> str:
+ if is_aws_cloud():
+ if sender_email_address is None:
+ raise ValueError(
+ "sender_email_address must be specified to run this test against AWS"
+ )
+ else:
+ # overwrite the given parameters with localstack specific ones
+ sender_email_address = f"sender-{short_uid()}@example.com"
+ ses_verify_identity(sender_email_address)
+
+ return sender_email_address
+
+ return inner
+
+
@pytest.fixture
def ec2_create_security_group(aws_client):
ec2_sgs = []
diff --git a/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py b/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py
index 0fdcfbebdfad7..13a134d269e85 100644
--- a/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py
+++ b/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py
@@ -1,5 +1,8 @@
import json
import logging
+import os
+import shutil
+import tempfile
from typing import Final
import pytest
@@ -144,6 +147,34 @@ def aws_client_no_sync_prefix(aws_client_factory):
return aws_client_factory(config=Config(inject_host_prefix=is_aws_cloud()))
+@pytest.fixture
+def mock_config_file():
+ tmp_dir = tempfile.mkdtemp()
+ file_path = os.path.join(tmp_dir, "MockConfigFile.json")
+
+ def write_json_to_mock_file(mock_config):
+ with open(file_path, "w") as df:
+ json.dump(mock_config, df) # noqa
+ df.flush()
+ return file_path
+
+ try:
+ yield write_json_to_mock_file
+ finally:
+ try:
+ os.remove(file_path)
+ except Exception as ex:
+ LOG.error("Error removing temporary MockConfigFile.json: %s", ex)
+ finally:
+ shutil.rmtree(
+ tmp_dir,
+ ignore_errors=True,
+ onerror=lambda _, path, exc_info: LOG.error(
+ "Error removing temporary MockConfigFile.json: %s, %s", path, exc_info
+ ),
+ )
+
+
@pytest.fixture
def create_state_machine_iam_role(cleanups, create_state_machine):
def _create(target_aws_client):
diff --git a/localstack-core/localstack/testing/pytest/stepfunctions/utils.py b/localstack-core/localstack/testing/pytest/stepfunctions/utils.py
index ddc0d6f6e41fe..3b2925e5a9353 100644
--- a/localstack-core/localstack/testing/pytest/stepfunctions/utils.py
+++ b/localstack-core/localstack/testing/pytest/stepfunctions/utils.py
@@ -10,6 +10,7 @@
TransformContext,
)
+from localstack import config
from localstack.aws.api.stepfunctions import (
Arn,
CloudWatchLogsLogGroup,
@@ -27,6 +28,7 @@
from localstack.services.stepfunctions.asl.eval.event.logging import is_logging_enabled_for
from localstack.services.stepfunctions.asl.utils.encoding import to_json_str
from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError, extract_json
+from localstack.testing.aws.util import is_aws_cloud
from localstack.utils.strings import short_uid
from localstack.utils.sync import poll_condition
@@ -36,6 +38,16 @@
# For EXPRESS state machines, the deletion will happen eventually (usually less than a minute).
# Running executions may emit logs after DeleteStateMachine API is called.
_DELETION_TIMEOUT_SECS: Final[int] = 120
+_SAMPLING_INTERVAL_SECONDS_AWS_CLOUD: Final[int] = 1
+_SAMPLING_INTERVAL_SECONDS_LOCALSTACK: Final[float] = 0.2
+
+
+def _get_sampling_interval_seconds() -> int | float:
+ return (
+ _SAMPLING_INTERVAL_SECONDS_AWS_CLOUD
+ if is_aws_cloud()
+ else _SAMPLING_INTERVAL_SECONDS_LOCALSTACK
+ )
def await_no_state_machines_listed(stepfunctions_client):
@@ -47,7 +59,7 @@ def _is_empty_state_machine_list():
success = poll_condition(
condition=_is_empty_state_machine_list,
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning("Timed out whilst awaiting for listing to be empty.")
@@ -76,7 +88,7 @@ def await_state_machine_alias_is_created(
state_machine_alias_arn=state_machine_alias_arn,
),
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning("Timed out whilst awaiting for listing to be empty.")
@@ -92,7 +104,7 @@ def await_state_machine_alias_is_deleted(
state_machine_alias_arn=state_machine_alias_arn,
),
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning("Timed out whilst awaiting for listing to be empty.")
@@ -122,7 +134,7 @@ def await_state_machine_not_listed(stepfunctions_client, state_machine_arn: str)
success = poll_condition(
condition=lambda: not _is_state_machine_listed(stepfunctions_client, state_machine_arn),
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning("Timed out whilst awaiting for listing to exclude '%s'.", state_machine_arn)
@@ -132,7 +144,7 @@ def await_state_machine_listed(stepfunctions_client, state_machine_arn: str):
success = poll_condition(
condition=lambda: _is_state_machine_listed(stepfunctions_client, state_machine_arn),
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning("Timed out whilst awaiting for listing to include '%s'.", state_machine_arn)
@@ -146,7 +158,7 @@ def await_state_machine_version_not_listed(
stepfunctions_client, state_machine_arn, state_machine_version_arn
),
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning(
@@ -164,7 +176,7 @@ def await_state_machine_version_listed(
stepfunctions_client, state_machine_arn, state_machine_version_arn
),
timeout=_DELETION_TIMEOUT_SECS,
- interval=1,
+ interval=_get_sampling_interval_seconds(),
)
if not success:
LOG.warning(
@@ -190,7 +202,9 @@ def _run_check():
res: bool = check_func(events)
return res
- assert poll_condition(condition=_run_check, timeout=120, interval=1)
+ assert poll_condition(
+ condition=_run_check, timeout=120, interval=_get_sampling_interval_seconds()
+ )
return events
@@ -223,7 +237,9 @@ def _run_check():
return True
return False
- success = poll_condition(condition=_run_check, timeout=120, interval=1)
+ success = poll_condition(
+ condition=_run_check, timeout=120, interval=_get_sampling_interval_seconds()
+ )
if not success:
LOG.warning(
"Timed out whilst awaiting for execution status %s to satisfy condition for execution '%s'.",
@@ -264,7 +280,9 @@ def _check_last_is_terminal() -> bool:
return execution["status"] != ExecutionStatus.RUNNING
return False
- success = poll_condition(condition=_check_last_is_terminal, timeout=120, interval=1)
+ success = poll_condition(
+ condition=_check_last_is_terminal, timeout=120, interval=_get_sampling_interval_seconds()
+ )
if not success:
LOG.warning(
"Timed out whilst awaiting for execution events to satisfy condition for execution '%s'.",
@@ -291,7 +309,9 @@ def _run_check():
status: ExecutionStatus = desc_res["status"]
return status == ExecutionStatus.ABORTED
- success = poll_condition(condition=_run_check, timeout=120, interval=1)
+ success = poll_condition(
+ condition=_run_check, timeout=120, interval=_get_sampling_interval_seconds()
+ )
if not success:
LOG.warning("Timed out whilst awaiting for execution '%s' to abort.", execution_arn)
@@ -383,6 +403,7 @@ def create_state_machine_with_iam_role(
snapshot,
definition: Definition,
logging_configuration: Optional[LoggingConfiguration] = None,
+ state_machine_name: Optional[str] = None,
):
snf_role_arn = create_state_machine_iam_role(target_aws_client=target_aws_client)
snapshot.add_transformer(RegexTransformer(snf_role_arn, "snf_role_arn"))
@@ -396,7 +417,7 @@ def create_state_machine_with_iam_role(
RegexTransformer("Request ID: [a-zA-Z0-9-]+", "Request ID: ")
)
- sm_name: str = f"statemachine_create_and_record_execution_{short_uid()}"
+ sm_name: str = state_machine_name or f"statemachine_create_and_record_execution_{short_uid()}"
create_arguments = {
"name": sm_name,
"definition": definition,
@@ -450,6 +471,42 @@ def launch_and_record_execution(
return execution_arn
+def launch_and_record_mocked_execution(
+ target_aws_client,
+ sfn_snapshot,
+ state_machine_arn,
+ execution_input,
+ test_name,
+) -> LongArn:
+ stepfunctions_client = target_aws_client.stepfunctions
+ exec_resp = stepfunctions_client.start_execution(
+ stateMachineArn=f"{state_machine_arn}#{test_name}", input=execution_input
+ )
+ sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_exec_arn(exec_resp, 0))
+ execution_arn = exec_resp["executionArn"]
+
+ await_execution_terminated(
+ stepfunctions_client=stepfunctions_client, execution_arn=execution_arn
+ )
+
+ get_execution_history = stepfunctions_client.get_execution_history(executionArn=execution_arn)
+
+ # Transform all map runs if any.
+ try:
+ map_run_arns = extract_json("$..mapRunArn", get_execution_history)
+ if isinstance(map_run_arns, str):
+ map_run_arns = [map_run_arns]
+ for i, map_run_arn in enumerate(list(set(map_run_arns))):
+ sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_map_run_arn(map_run_arn, i))
+ except NoSuchJsonPathError:
+ # No mapRunArns
+ pass
+
+ sfn_snapshot.match("get_execution_history", get_execution_history)
+
+ return execution_arn
+
+
def launch_and_record_logs(
target_aws_client,
state_machine_arn,
@@ -487,7 +544,6 @@ def launch_and_record_logs(
sfn_snapshot.match("logged_execution_events", logged_execution_events)
-# TODO: make this return the execution ARN for manual assertions
def create_and_record_execution(
target_aws_client,
create_state_machine_iam_role,
@@ -496,7 +552,7 @@ def create_and_record_execution(
definition,
execution_input,
verify_execution_description=False,
-):
+) -> LongArn:
state_machine_arn = create_state_machine_with_iam_role(
target_aws_client,
create_state_machine_iam_role,
@@ -504,13 +560,72 @@ def create_and_record_execution(
sfn_snapshot,
definition,
)
- launch_and_record_execution(
+ exeuction_arn = launch_and_record_execution(
target_aws_client,
sfn_snapshot,
state_machine_arn,
execution_input,
verify_execution_description,
)
+ return exeuction_arn
+
+
+def create_and_record_mocked_execution(
+ target_aws_client,
+ create_state_machine_iam_role,
+ create_state_machine,
+ sfn_snapshot,
+ definition,
+ execution_input,
+ state_machine_name,
+ test_name,
+) -> LongArn:
+ state_machine_arn = create_state_machine_with_iam_role(
+ target_aws_client,
+ create_state_machine_iam_role,
+ create_state_machine,
+ sfn_snapshot,
+ definition,
+ state_machine_name=state_machine_name,
+ )
+ execution_arn = launch_and_record_mocked_execution(
+ target_aws_client, sfn_snapshot, state_machine_arn, execution_input, test_name
+ )
+ return execution_arn
+
+
+def create_and_run_mock(
+ target_aws_client,
+ monkeypatch,
+ mock_config_file,
+ mock_config: dict,
+ state_machine_name: str,
+ definition_template: dict,
+ execution_input: str,
+ test_name: str,
+):
+ mock_config_file_path = mock_config_file(mock_config)
+ monkeypatch.setattr(config, "SFN_MOCK_CONFIG", mock_config_file_path)
+
+ sfn_client = target_aws_client.stepfunctions
+
+ state_machine_name: str = state_machine_name or f"mocked_statemachine_{short_uid()}"
+ definition = json.dumps(definition_template)
+ creation_response = sfn_client.create_state_machine(
+ name=state_machine_name,
+ definition=definition,
+ roleArn="arn:aws:iam::111111111111:role/mock-role/mocked-run",
+ )
+ state_machine_arn = creation_response["stateMachineArn"]
+
+ test_case_arn = f"{state_machine_arn}#{test_name}"
+ execution = sfn_client.start_execution(stateMachineArn=test_case_arn, input=execution_input)
+ execution_arn = execution["executionArn"]
+
+ await_execution_terminated(stepfunctions_client=sfn_client, execution_arn=execution_arn)
+ sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
+
+ return execution_arn
def create_and_record_logs(
diff --git a/localstack-core/localstack/testing/snapshots/transformer_utility.py b/localstack-core/localstack/testing/snapshots/transformer_utility.py
index 77a9bdfc6e0b5..7d2d73c844dbb 100644
--- a/localstack-core/localstack/testing/snapshots/transformer_utility.py
+++ b/localstack-core/localstack/testing/snapshots/transformer_utility.py
@@ -648,6 +648,19 @@ def secretsmanager_api():
),
"version_uuid",
),
+ KeyValueBasedTransformer(
+ lambda k, v: (
+ v
+ if (
+ isinstance(k, str)
+ and k == "RotationLambdaARN"
+ and isinstance(v, str)
+ and re.match(PATTERN_ARN, v)
+ )
+ else None
+ ),
+ "lambda-arn",
+ ),
SortingTransformer("VersionStages"),
SortingTransformer("Versions", lambda e: e.get("CreatedDate")),
]
diff --git a/localstack-core/localstack/utils/analytics/metadata.py b/localstack-core/localstack/utils/analytics/metadata.py
index c0ef292d69121..da135c861a323 100644
--- a/localstack-core/localstack/utils/analytics/metadata.py
+++ b/localstack-core/localstack/utils/analytics/metadata.py
@@ -237,11 +237,11 @@ def prepare_host_machine_id():
@hooks.configure_localstack_container()
def _mount_machine_file(container: Container):
- from localstack.utils.container_utils.container_client import VolumeBind
+ from localstack.utils.container_utils.container_client import BindMount
# mount tha machine file from the host's CLI cache directory into the appropriate location in the
# container
machine_file = os.path.join(config.dirs.cache, "machine.json")
if os.path.isfile(machine_file):
target = os.path.join(config.dirs.for_container().cache, "machine.json")
- container.config.volumes.add(VolumeBind(machine_file, target, read_only=True))
+ container.config.volumes.add(BindMount(machine_file, target, read_only=True))
diff --git a/localstack-core/localstack/utils/analytics/usage.py b/localstack-core/localstack/utils/analytics/usage.py
deleted file mode 100644
index 3c7abd64c7024..0000000000000
--- a/localstack-core/localstack/utils/analytics/usage.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"""
-[DEPRECATED] This module is deprecated in favor of `localstack.utils.analytics.metrics`.
-"""
-
-import datetime
-import math
-from collections import defaultdict
-from itertools import count
-from typing import Any
-
-from localstack import config
-from localstack.runtime import hooks
-from localstack.utils.analytics import get_session_id
-from localstack.utils.analytics.events import Event, EventMetadata
-from localstack.utils.analytics.publisher import AnalyticsClientPublisher
-
-# Counters have to register with the registry
-collector_registry: dict[str, Any] = dict()
-
-
-# TODO: introduce some base abstraction for the counters after gather some initial experience working with it
-# we could probably do intermediate aggregations over time to avoid unbounded counters for very long LS sessions
-# for now, we can recommend to use config.DISABLE_EVENTS=1
-
-
-class UsageSetCounter:
- """
- [DEPRECATED] Use `localstack.utils.analytics.metrics.Counter` instead.
- Use this counter to count occurrences of unique values
-
- Example:
- my_feature_counter = UsageSetCounter("lambda:runtime")
- my_feature_counter.record("python3.7")
- my_feature_counter.record("nodejs16.x")
- my_feature_counter.record("nodejs16.x")
- my_feature_counter.aggregate() # returns {"python3.7": 1, "nodejs16.x": 2}
- """
-
- state: dict[str, int]
- _counter: dict[str, count]
- namespace: str
-
- def __init__(self, namespace: str):
- self.enabled = not config.DISABLE_EVENTS
- self.state = {}
- self._counter = defaultdict(lambda: count(1))
- self.namespace = namespace
- collector_registry[namespace] = self
-
- def record(self, value: str):
- if self.enabled:
- self.state[value] = next(self._counter[value])
-
- def aggregate(self) -> dict:
- return self.state
-
-
-class UsageCounter:
- """
- [DEPRECATED] Use `localstack.utils.analytics.metrics.Counter` instead.
- Use this counter to count numeric values
-
- Example:
- my__counter = UsageCounter("lambda:somefeature")
- my_counter.increment()
- my_counter.increment()
- my_counter.aggregate() # returns {"count": 2}
- """
-
- state: int
- namespace: str
-
- def __init__(self, namespace: str):
- self.enabled = not config.DISABLE_EVENTS
- self.state = 0
- self._counter = count(1)
- self.namespace = namespace
- collector_registry[namespace] = self
-
- def increment(self):
- # TODO: we should instead have different underlying datastructures to store the state, and have no-op operations
- # when config.DISABLE_EVENTS is set
- if self.enabled:
- self.state = next(self._counter)
-
- def aggregate(self) -> dict:
- # TODO: should we just keep `count`? "sum" might need to be kept for historical data?
- return {"count": self.state, "sum": self.state}
-
-
-class TimingStats:
- """
- Use this counter to measure numeric values and perform aggregations
-
- Available aggregations: min, max, sum, mean, median, count
-
- Example:
- my_feature_counter = TimingStats("lambda:somefeature", aggregations=["min", "max", "sum", "count"])
- my_feature_counter.record_value(512)
- my_feature_counter.record_value(256)
- my_feature_counter.aggregate() # returns {"min": 256, "max": 512, "sum": 768, "count": 2}
- """
-
- state: list[int | float]
- namespace: str
- aggregations: list[str]
-
- def __init__(self, namespace: str, aggregations: list[str]):
- self.enabled = not config.DISABLE_EVENTS
- self.state = []
- self.namespace = namespace
- self.aggregations = aggregations
- collector_registry[namespace] = self
-
- def record_value(self, value: int | float):
- if self.enabled:
- self.state.append(value)
-
- def aggregate(self) -> dict:
- result = {}
- if self.state:
- for aggregation in self.aggregations:
- if aggregation == "sum":
- result[aggregation] = sum(self.state)
- elif aggregation == "min":
- result[aggregation] = min(self.state)
- elif aggregation == "max":
- result[aggregation] = max(self.state)
- elif aggregation == "mean":
- result[aggregation] = sum(self.state) / len(self.state)
- elif aggregation == "median":
- median_index = math.floor(len(self.state) / 2)
- result[aggregation] = sorted(self.state)[median_index]
- elif aggregation == "count":
- result[aggregation] = len(self.state)
- else:
- raise Exception(f"Unsupported aggregation: {aggregation}")
- return result
-
-
-def aggregate() -> dict:
- aggregated_payload = {}
- for ns, collector in collector_registry.items():
- agg = collector.aggregate()
- if agg:
- aggregated_payload[ns] = agg
- return aggregated_payload
-
-
-@hooks.on_infra_shutdown()
-def aggregate_and_send():
- """
- Aggregates data from all registered usage trackers and immediately sends the aggregated result to the analytics service.
- """
- if config.DISABLE_EVENTS:
- return
-
- metadata = EventMetadata(
- session_id=get_session_id(),
- client_time=str(datetime.datetime.now()),
- )
-
- aggregated_payload = aggregate()
-
- if aggregated_payload:
- publisher = AnalyticsClientPublisher()
- publisher.publish(
- [Event(name="ls:usage_analytics", metadata=metadata, payload=aggregated_payload)]
- )
diff --git a/localstack-core/localstack/utils/archives.py b/localstack-core/localstack/utils/archives.py
index dfba8d3c9aafc..97477f6d86c74 100644
--- a/localstack-core/localstack/utils/archives.py
+++ b/localstack-core/localstack/utils/archives.py
@@ -1,21 +1,14 @@
-import io
-import tarfile
-import zipfile
-from subprocess import Popen
-from typing import IO, Optional
-
-try:
- from typing import Literal
-except ImportError:
- from typing_extensions import Literal
-
import glob
+import io
import logging
import os
import re
+import tarfile
import tempfile
import time
-from typing import Union
+import zipfile
+from subprocess import Popen
+from typing import IO, Literal, Optional, Union
from localstack.constants import MAVEN_REPO_URL
from localstack.utils.files import load_file, mkdir, new_tmp_file, rm_rf, save_file
@@ -177,7 +170,13 @@ def upgrade_jar_file(base_dir: str, file_glob: str, maven_asset: str):
download(maven_asset_url, target_file)
-def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None):
+def download_and_extract(
+ archive_url: str,
+ target_dir: str,
+ retries: Optional[int] = 0,
+ sleep: Optional[int] = 3,
+ tmp_archive: Optional[str] = None,
+) -> None:
mkdir(target_dir)
_, ext = os.path.splitext(tmp_archive or archive_url)
diff --git a/localstack-core/localstack/utils/aws/arns.py b/localstack-core/localstack/utils/aws/arns.py
index 6caf2d10a6c5e..5b6f139473bac 100644
--- a/localstack-core/localstack/utils/aws/arns.py
+++ b/localstack-core/localstack/utils/aws/arns.py
@@ -524,6 +524,16 @@ def route53_resolver_query_log_config_arn(id: str, account_id: str, region_name:
return _resource_arn(id, pattern, account_id=account_id, region_name=region_name)
+#
+# SES
+#
+
+
+def ses_identity_arn(email: str, account_id: str, region_name: str) -> str:
+ pattern = "arn:%s:ses:%s:%s:identity/%s"
+ return _resource_arn(email, pattern, account_id=account_id, region_name=region_name)
+
+
#
# Other ARN related helpers
#
diff --git a/localstack-core/localstack/utils/aws/client_types.py b/localstack-core/localstack/utils/aws/client_types.py
index c0d48a92555a9..1fd9f3a84df5e 100644
--- a/localstack-core/localstack/utils/aws/client_types.py
+++ b/localstack-core/localstack/utils/aws/client_types.py
@@ -31,6 +31,7 @@
from mypy_boto3_cloudwatch import CloudWatchClient
from mypy_boto3_codebuild import CodeBuildClient
from mypy_boto3_codecommit import CodeCommitClient
+ from mypy_boto3_codeconnections import CodeConnectionsClient
from mypy_boto3_codedeploy import CodeDeployClient
from mypy_boto3_codepipeline import CodePipelineClient
from mypy_boto3_codestar_connections import CodeStarconnectionsClient
@@ -109,6 +110,7 @@
from mypy_boto3_timestream_query import TimestreamQueryClient
from mypy_boto3_timestream_write import TimestreamWriteClient
from mypy_boto3_transcribe import TranscribeServiceClient
+ from mypy_boto3_verifiedpermissions import VerifiedPermissionsClient
from mypy_boto3_wafv2 import WAFV2Client
from mypy_boto3_xray import XRayClient
@@ -139,6 +141,9 @@ class TypedServiceClientFactory(abc.ABC):
cloudwatch: Union["CloudWatchClient", "MetadataRequestInjector[CloudWatchClient]"]
codebuild: Union["CodeBuildClient", "MetadataRequestInjector[CodeBuildClient]"]
codecommit: Union["CodeCommitClient", "MetadataRequestInjector[CodeCommitClient]"]
+ codeconnections: Union[
+ "CodeConnectionsClient", "MetadataRequestInjector[CodeConnectionsClient]"
+ ]
codedeploy: Union["CodeDeployClient", "MetadataRequestInjector[CodeDeployClient]"]
codepipeline: Union["CodePipelineClient", "MetadataRequestInjector[CodePipelineClient]"]
codestar_connections: Union[
@@ -255,6 +260,9 @@ class TypedServiceClientFactory(abc.ABC):
"TimestreamWriteClient", "MetadataRequestInjector[TimestreamWriteClient]"
]
transcribe: Union["TranscribeServiceClient", "MetadataRequestInjector[TranscribeServiceClient]"]
+ verifiedpermissions: Union[
+ "VerifiedPermissionsClient", "MetadataRequestInjector[VerifiedPermissionsClient]"
+ ]
wafv2: Union["WAFV2Client", "MetadataRequestInjector[WAFV2Client]"]
xray: Union["XRayClient", "MetadataRequestInjector[XRayClient]"]
diff --git a/localstack-core/localstack/utils/bootstrap.py b/localstack-core/localstack/utils/bootstrap.py
index ddca686698185..e767c22f90b30 100644
--- a/localstack-core/localstack/utils/bootstrap.py
+++ b/localstack-core/localstack/utils/bootstrap.py
@@ -24,6 +24,7 @@
from localstack.runtime import hooks
from localstack.utils.container_networking import get_main_container_name
from localstack.utils.container_utils.container_client import (
+ BindMount,
CancellableStream,
ContainerClient,
ContainerConfiguration,
@@ -33,7 +34,7 @@
NoSuchImage,
NoSuchNetwork,
PortMappings,
- VolumeBind,
+ VolumeDirMount,
VolumeMappings,
)
from localstack.utils.container_utils.docker_cmd_client import CmdDockerClient
@@ -491,7 +492,7 @@ def mount_docker_socket(cfg: ContainerConfiguration):
target = "/var/run/docker.sock"
if cfg.volumes.find_target_mapping(target):
return
- cfg.volumes.add(VolumeBind(source, target))
+ cfg.volumes.add(BindMount(source, target))
cfg.env_vars["DOCKER_HOST"] = f"unix://{target}"
@staticmethod
@@ -501,7 +502,7 @@ def mount_localstack_volume(host_path: str | os.PathLike = None):
def _cfg(cfg: ContainerConfiguration):
if cfg.volumes.find_target_mapping(constants.DEFAULT_VOLUME_DIR):
return
- cfg.volumes.add(VolumeBind(str(host_path), constants.DEFAULT_VOLUME_DIR))
+ cfg.volumes.add(BindMount(str(host_path), constants.DEFAULT_VOLUME_DIR))
return _cfg
@@ -679,7 +680,7 @@ def _cfg(cfg: ContainerConfiguration):
return _cfg
@staticmethod
- def volume(volume: VolumeBind):
+ def volume(volume: BindMount | VolumeDirMount):
def _cfg(cfg: ContainerConfiguration):
cfg.volumes.add(volume)
@@ -807,7 +808,7 @@ def volume_cli_params(params: Iterable[str] = None):
def _cfg(cfg: ContainerConfiguration):
for param in params:
- cfg.volumes.append(VolumeBind.parse(param))
+ cfg.volumes.append(BindMount.parse(param))
return _cfg
diff --git a/localstack-core/localstack/utils/container_utils/container_client.py b/localstack-core/localstack/utils/container_utils/container_client.py
index 945832829203e..e05fdd6da5a55 100644
--- a/localstack-core/localstack/utils/container_utils/container_client.py
+++ b/localstack-core/localstack/utils/container_utils/container_client.py
@@ -27,6 +27,7 @@
import dotenv
from localstack import config
+from localstack.constants import DEFAULT_VOLUME_DIR
from localstack.utils.collections import HashableList, ensure_list
from localstack.utils.files import TMP_FILES, chmod_r, rm_rf, save_file
from localstack.utils.no_exit_argument_parser import NoExitArgumentParser
@@ -370,7 +371,7 @@ def __repr__(self):
@dataclasses.dataclass
-class VolumeBind:
+class BindMount:
"""Represents a --volume argument run/create command. When using VolumeBind to bind-mount a file or directory
that does not yet exist on the Docker host, -v creates the endpoint for you. It is always created as a directory.
"""
@@ -395,8 +396,14 @@ def to_str(self) -> str:
return ":".join(args)
+ def to_docker_sdk_parameters(self) -> tuple[str, dict[str, str]]:
+ return str(self.host_dir), {
+ "bind": self.container_dir,
+ "mode": "ro" if self.read_only else "rw",
+ }
+
@classmethod
- def parse(cls, param: str) -> "VolumeBind":
+ def parse(cls, param: str) -> "BindMount":
parts = param.split(":")
if 1 > len(parts) > 3:
raise ValueError(f"Cannot parse volume bind {param}")
@@ -408,27 +415,66 @@ def parse(cls, param: str) -> "VolumeBind":
return volume
+@dataclasses.dataclass
+class VolumeDirMount:
+ volume_path: str
+ """
+ Absolute path inside /var/lib/localstack to mount into the container
+ """
+ container_path: str
+ """
+ Target path inside the started container
+ """
+ read_only: bool = False
+
+ def to_str(self) -> str:
+ self._validate()
+ from localstack.utils.docker_utils import get_host_path_for_path_in_docker
+
+ host_dir = get_host_path_for_path_in_docker(self.volume_path)
+ return f"{host_dir}:{self.container_path}{':ro' if self.read_only else ''}"
+
+ def _validate(self):
+ if not self.volume_path:
+ raise ValueError("no volume dir specified")
+ if config.is_in_docker and not self.volume_path.startswith(DEFAULT_VOLUME_DIR):
+ raise ValueError(f"volume dir not starting with {DEFAULT_VOLUME_DIR}")
+ if not self.container_path:
+ raise ValueError("no container dir specified")
+
+ def to_docker_sdk_parameters(self) -> tuple[str, dict[str, str]]:
+ self._validate()
+ from localstack.utils.docker_utils import get_host_path_for_path_in_docker
+
+ host_dir = get_host_path_for_path_in_docker(self.volume_path)
+ return host_dir, {
+ "bind": self.container_path,
+ "mode": "ro" if self.read_only else "rw",
+ }
+
+
class VolumeMappings:
- mappings: List[Union[SimpleVolumeBind, VolumeBind]]
+ mappings: List[Union[SimpleVolumeBind, BindMount]]
- def __init__(self, mappings: List[Union[SimpleVolumeBind, VolumeBind]] = None):
+ def __init__(self, mappings: List[Union[SimpleVolumeBind, BindMount, VolumeDirMount]] = None):
self.mappings = mappings if mappings is not None else []
- def add(self, mapping: Union[SimpleVolumeBind, VolumeBind]):
+ def add(self, mapping: Union[SimpleVolumeBind, BindMount, VolumeDirMount]):
self.append(mapping)
def append(
self,
mapping: Union[
SimpleVolumeBind,
- VolumeBind,
+ BindMount,
+ VolumeDirMount,
],
):
self.mappings.append(mapping)
def find_target_mapping(
self, container_dir: str
- ) -> Optional[Union[SimpleVolumeBind, VolumeBind]]:
+ ) -> Optional[Union[SimpleVolumeBind, BindMount, VolumeDirMount]]:
"""
Looks through the volumes and returns the one where the container dir matches ``container_dir``.
Returns None if there is no volume mapping to the given container directory.
@@ -448,6 +494,12 @@ def __iter__(self):
def __repr__(self):
return self.mappings.__repr__()
+ def __len__(self):
+ return len(self.mappings)
+
+ def __getitem__(self, item: int):
+ return self.mappings[item]
+
VolumeType = Literal["bind", "volume"]
@@ -1441,12 +1493,9 @@ def convert_mount_list_to_dict(
) -> Dict[str, Dict[str, str]]:
"""Converts a List of (host_path, container_path) tuples to a Dict suitable as volume argument for docker sdk"""
- def _map_to_dict(paths: SimpleVolumeBind | VolumeBind):
- if isinstance(paths, VolumeBind):
- return str(paths.host_dir), {
- "bind": paths.container_dir,
- "mode": "ro" if paths.read_only else "rw",
- }
+ def _map_to_dict(paths: SimpleVolumeBind | BindMount | VolumeDirMount):
+ if isinstance(paths, (BindMount, VolumeDirMount)):
+ return paths.to_docker_sdk_parameters()
else:
return str(paths[0]), {"bind": paths[1], "mode": "rw"}
diff --git a/localstack-core/localstack/utils/container_utils/docker_cmd_client.py b/localstack-core/localstack/utils/container_utils/docker_cmd_client.py
index b65ddb2e8b018..7cdd7b59f8092 100644
--- a/localstack-core/localstack/utils/container_utils/docker_cmd_client.py
+++ b/localstack-core/localstack/utils/container_utils/docker_cmd_client.py
@@ -12,6 +12,7 @@
from localstack.utils.collections import ensure_list
from localstack.utils.container_utils.container_client import (
AccessDenied,
+ BindMount,
CancellableStream,
ContainerClient,
ContainerException,
@@ -29,7 +30,7 @@
SimpleVolumeBind,
Ulimit,
Util,
- VolumeBind,
+ VolumeDirMount,
)
from localstack.utils.run import run
from localstack.utils.strings import first_char_to_upper, to_str
@@ -878,7 +879,7 @@ def _build_run_create_cmd(
return cmd, env_file
@staticmethod
- def _map_to_volume_param(volume: Union[SimpleVolumeBind, VolumeBind]) -> str:
+ def _map_to_volume_param(volume: Union[SimpleVolumeBind, BindMount, VolumeDirMount]) -> str:
"""
Maps the mount volume, to a parameter for the -v docker cli argument.
@@ -889,7 +890,7 @@ def _map_to_volume_param(volume: Union[SimpleVolumeBind, VolumeBind]) -> str:
:param volume: Either a SimpleVolumeBind, in essence a tuple (host_dir, container_dir), or a VolumeBind object
:return: String which is passable as parameter to the docker cli -v option
"""
- if isinstance(volume, VolumeBind):
+ if isinstance(volume, (BindMount, VolumeDirMount)):
return volume.to_str()
else:
return f"{volume[0]}:{volume[1]}"
@@ -908,12 +909,15 @@ def _check_and_raise_no_such_container_error(
if any(msg.lower() in process_stdout_lower for msg in error_messages):
raise NoSuchContainer(container_name_or_id, stdout=error.stdout, stderr=error.stderr)
- def _transform_container_labels(self, labels: str) -> Dict[str, str]:
+ def _transform_container_labels(self, labels: Union[str, Dict[str, str]]) -> Dict[str, str]:
"""
Transforms the container labels returned by the docker command from the key-value pair format to a dict
:param labels: Input string, comma separated key value pairs. Example: key1=value1,key2=value2
:return: Dict representation of the passed values, example: {"key1": "value1", "key2": "value2"}
"""
+ if isinstance(labels, Dict):
+ return labels
+
labels = labels.split(",")
labels = [label.partition("=") for label in labels]
return {label[0]: label[2] for label in labels}
diff --git a/localstack-core/localstack/utils/coverage_docs.py b/localstack-core/localstack/utils/coverage_docs.py
index 43649df5fd102..fde4628a32f67 100644
--- a/localstack-core/localstack/utils/coverage_docs.py
+++ b/localstack-core/localstack/utils/coverage_docs.py
@@ -1,8 +1,4 @@
-COVERAGE_LINK_BASE = "https://docs.localstack.cloud/references/coverage/"
-MESSAGE_TEMPLATE = (
- f"API %sfor service '%s' not yet implemented or pro feature"
- f" - please check {COVERAGE_LINK_BASE}%s for further information"
-)
+_COVERAGE_LINK_BASE = "https://docs.localstack.cloud/references/coverage"
def get_coverage_link_for_service(service_name: str, action_name: str) -> str:
@@ -11,11 +7,14 @@ def get_coverage_link_for_service(service_name: str, action_name: str) -> str:
available_services = SERVICE_PLUGINS.list_available()
if service_name not in available_services:
- return MESSAGE_TEMPLATE % ("", service_name, "")
-
+ return (
+ f"The API for service '{service_name}' is either not included in your current license plan "
+ "or has not yet been emulated by LocalStack. "
+ f"Please refer to {_COVERAGE_LINK_BASE} for more details."
+ )
else:
- return MESSAGE_TEMPLATE % (
- f"action '{action_name}' ",
- service_name,
- f"coverage_{service_name}/",
+ return (
+ f"The API action '{action_name}' for service '{service_name}' is either not available in "
+ "your current license plan or has not yet been emulated by LocalStack. "
+ f"Please refer to {_COVERAGE_LINK_BASE}/coverage_{service_name} for more information."
)
diff --git a/localstack-core/localstack/utils/diagnose.py b/localstack-core/localstack/utils/diagnose.py
index 0de08f10d5ca0..36b0b079631f9 100644
--- a/localstack-core/localstack/utils/diagnose.py
+++ b/localstack-core/localstack/utils/diagnose.py
@@ -10,7 +10,7 @@
from localstack.services.lambda_.invocation.docker_runtime_executor import IMAGE_PREFIX
from localstack.services.lambda_.runtimes import IMAGE_MAPPING
from localstack.utils import bootstrap
-from localstack.utils.analytics import usage
+from localstack.utils.analytics.metrics import MetricRegistry
from localstack.utils.container_networking import get_main_container_name
from localstack.utils.container_utils.container_client import ContainerException, NoSuchImage
from localstack.utils.docker_utils import DOCKER_CLIENT
@@ -153,4 +153,4 @@ def get_host_kernel_version() -> str:
def get_usage():
- return usage.aggregate()
+ return MetricRegistry().collect()
diff --git a/localstack-core/localstack/utils/docker_utils.py b/localstack-core/localstack/utils/docker_utils.py
index bab738135f053..9ff5f57134ca6 100644
--- a/localstack-core/localstack/utils/docker_utils.py
+++ b/localstack-core/localstack/utils/docker_utils.py
@@ -156,14 +156,14 @@ def container_ports_can_be_bound(
except Exception as e:
if "port is already allocated" not in str(e) and "address already in use" not in str(e):
LOG.warning(
- "Unexpected error when attempting to determine container port status: %s", e
+ "Unexpected error when attempting to determine container port status", exc_info=e
)
return False
# TODO(srw): sometimes the command output from the docker container is "None", particularly when this function is
# invoked multiple times consecutively. Work out why.
if to_str(result[0] or "").strip() != "test123":
LOG.warning(
- "Unexpected output when attempting to determine container port status: %s", result[0]
+ "Unexpected output when attempting to determine container port status: %s", result
)
return True
diff --git a/localstack-core/localstack/utils/strings.py b/localstack-core/localstack/utils/strings.py
index 33f5f203a3d66..aead8aaade907 100644
--- a/localstack-core/localstack/utils/strings.py
+++ b/localstack-core/localstack/utils/strings.py
@@ -78,6 +78,10 @@ def snake_to_camel_case(string: str, capitalize_first: bool = True) -> str:
return "".join(components)
+def hyphen_to_snake_case(string: str) -> str:
+ return string.replace("-", "_")
+
+
def canonicalize_bool_to_str(val: bool) -> str:
return "true" if str(val).lower() == "true" else "false"
@@ -234,3 +238,9 @@ def key_value_pairs_to_dict(pairs: str, delimiter: str = ",", separator: str = "
"""
splits = [split_pair.partition(separator) for split_pair in pairs.split(delimiter)]
return {key.strip(): value.strip() for key, _, value in splits}
+
+
+def token_generator(item: str) -> str:
+ base64_bytes = base64.b64encode(item.encode("utf-8"))
+ token = base64_bytes.decode("utf-8")
+ return token
diff --git a/localstack-core/localstack/utils/venv.py b/localstack-core/localstack/utils/venv.py
index 21d5bf4fa3ece..7911110ce54f6 100644
--- a/localstack-core/localstack/utils/venv.py
+++ b/localstack-core/localstack/utils/venv.py
@@ -14,7 +14,7 @@ class VirtualEnvironment:
def __init__(self, venv_dir: Union[str, os.PathLike]):
self._venv_dir = venv_dir
- def create(self):
+ def create(self) -> None:
"""
Uses the virtualenv cli to create the virtual environment.
:return:
@@ -73,7 +73,7 @@ def site_dir(self) -> Path:
return matches[0]
- def inject_to_sys_path(self):
+ def inject_to_sys_path(self) -> None:
path = str(self.site_dir)
if path and path not in sys.path:
sys.path.append(path)
diff --git a/localstack-core/mypy.ini b/localstack-core/mypy.ini
new file mode 100644
index 0000000000000..d5ec889accc0b
--- /dev/null
+++ b/localstack-core/mypy.ini
@@ -0,0 +1,19 @@
+[mypy]
+explicit_package_bases = true
+mypy_path=localstack-core
+files=localstack/packages,localstack/services/kinesis/packages.py
+ignore_missing_imports = False
+follow_imports = silent
+ignore_errors = False
+disallow_untyped_defs = True
+disallow_untyped_calls = True
+disallow_any_generics = True
+disallow_subclassing_any = True
+warn_unused_ignores = True
+
+[mypy-localstack.services.lambda_.invocation.*,localstack.services.lambda_.provider]
+ignore_errors = False
+disallow_untyped_defs = True
+disallow_untyped_calls = True
+disallow_any_generics = True
+allow_untyped_globals = False
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index f53f61c32fa73..0000000000000
--- a/mypy.ini
+++ /dev/null
@@ -1,10 +0,0 @@
-[mypy]
-ignore_missing_imports = True
-ignore_errors = True
-
-[mypy-localstack.services.lambda_.invocation.*,localstack.services.lambda_.provider]
-ignore_errors = False
-disallow_untyped_defs = True
-disallow_untyped_calls = True
-disallow_any_generics = True
-allow_untyped_globals = False
diff --git a/pyproject.toml b/pyproject.toml
index a992abc6ef877..fb920801351d3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,6 +9,7 @@ authors = [
{ name = "LocalStack Contributors", email = "info@localstack.cloud" }
]
description = "The core library and runtime of LocalStack"
+license = "Apache-2.0"
requires-python = ">=3.9"
dependencies = [
"build",
@@ -31,7 +32,6 @@ dynamic = ["version"]
classifiers = [
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
- "License :: OSI Approved :: Apache Software License",
"Topic :: Internet",
"Topic :: Software Development :: Testing",
"Topic :: System :: Emulators",
@@ -53,9 +53,9 @@ Issues = "https://github.com/localstack/localstack/issues"
# minimal required to actually run localstack on the host for services natively implemented in python
base-runtime = [
# pinned / updated by ASF update action
- "boto3==1.37.18",
+ "boto3==1.38.8",
# pinned / updated by ASF update action
- "botocore==1.37.18",
+ "botocore==1.38.8",
"awscrt>=0.13.14",
"cbor2>=5.5.0",
"dnspython>=1.16.0",
@@ -78,9 +78,10 @@ base-runtime = [
runtime = [
"localstack-core[base-runtime]",
# pinned / updated by ASF update action
- "awscli>=1.32.117",
+ "awscli>=1.37.0",
"airspeed-ext>=0.6.3",
- "amazon_kclpy>=3.0.0",
+ # version that has a built wheel
+ "kclpy-ext>=3.0.0",
# antlr4-python3-runtime: exact pin because antlr4 runtime is tightly coupled to the generated parser code
"antlr4-python3-runtime==4.13.2",
"apispec>=5.1.1",
@@ -92,7 +93,7 @@ runtime = [
"json5>=0.9.11",
"jsonpath-ng>=1.6.1",
"jsonpath-rw>=1.4.0",
- "moto-ext[all]==5.1.1.post1",
+ "moto-ext[all]==5.1.4.post1",
"opensearch-py>=2.4.1",
"pymongo>=4.2.0",
"pyopenssl>=23.0.0",
@@ -108,8 +109,7 @@ test = [
"pluggy>=1.3.0",
"pytest>=7.4.2",
"pytest-split>=0.8.0",
- # TODO fix issues with pytest-httpserver==1.1.2, remove upper boundary
- "pytest-httpserver>=1.0.1,<1.1.2",
+ "pytest-httpserver>=1.1.2",
"pytest-rerunfailures>=12.0",
"pytest-tinybird>=0.2.0",
"aws-cdk-lib>=2.88.0",
@@ -130,6 +130,7 @@ dev = [
"pypandoc",
"ruff>=0.3.3",
"rstr>=3.2.0",
+ "mypy",
]
# not strictly necessary for development, but provides type hint support for a better developer experience
@@ -137,7 +138,7 @@ typehint = [
# typehint is an optional extension of the dev dependencies
"localstack-core[dev]",
# pinned / updated by ASF update action
- "boto3-stubs[acm,acm-pca,amplify,apigateway,apigatewayv2,appconfig,appconfigdata,application-autoscaling,appsync,athena,autoscaling,backup,batch,ce,cloudcontrol,cloudformation,cloudfront,cloudtrail,cloudwatch,codebuild,codecommit,codedeploy,codepipeline,codestar-connections,cognito-identity,cognito-idp,dms,docdb,dynamodb,dynamodbstreams,ec2,ecr,ecs,efs,eks,elasticache,elasticbeanstalk,elbv2,emr,emr-serverless,es,events,firehose,fis,glacier,glue,iam,identitystore,iot,iot-data,iotanalytics,iotwireless,kafka,kinesis,kinesisanalytics,kinesisanalyticsv2,kms,lakeformation,lambda,logs,managedblockchain,mediaconvert,mediastore,mq,mwaa,neptune,opensearch,organizations,pi,pipes,pinpoint,qldb,qldb-session,rds,rds-data,redshift,redshift-data,resource-groups,resourcegroupstaggingapi,route53,route53resolver,s3,s3control,sagemaker,sagemaker-runtime,secretsmanager,serverlessrepo,servicediscovery,ses,sesv2,sns,sqs,ssm,sso-admin,stepfunctions,sts,timestream-query,timestream-write,transcribe,wafv2,xray]",
+ "boto3-stubs[acm,acm-pca,amplify,apigateway,apigatewayv2,appconfig,appconfigdata,application-autoscaling,appsync,athena,autoscaling,backup,batch,ce,cloudcontrol,cloudformation,cloudfront,cloudtrail,cloudwatch,codebuild,codecommit,codeconnections,codedeploy,codepipeline,codestar-connections,cognito-identity,cognito-idp,dms,docdb,dynamodb,dynamodbstreams,ec2,ecr,ecs,efs,eks,elasticache,elasticbeanstalk,elbv2,emr,emr-serverless,es,events,firehose,fis,glacier,glue,iam,identitystore,iot,iot-data,iotanalytics,iotwireless,kafka,kinesis,kinesisanalytics,kinesisanalyticsv2,kms,lakeformation,lambda,logs,managedblockchain,mediaconvert,mediastore,mq,mwaa,neptune,opensearch,organizations,pi,pipes,pinpoint,qldb,qldb-session,rds,rds-data,redshift,redshift-data,resource-groups,resourcegroupstaggingapi,route53,route53resolver,s3,s3control,sagemaker,sagemaker-runtime,secretsmanager,serverlessrepo,servicediscovery,ses,sesv2,sns,sqs,ssm,sso-admin,stepfunctions,sts,timestream-query,timestream-write,transcribe,verifiedpermissions,wafv2,xray]",
]
[tool.setuptools]
diff --git a/requirements-base-runtime.txt b/requirements-base-runtime.txt
index 052fb394ca523..4caa10a3d5ef0 100644
--- a/requirements-base-runtime.txt
+++ b/requirements-base-runtime.txt
@@ -9,11 +9,11 @@ attrs==25.3.0
# jsonschema
# localstack-twisted
# referencing
-awscrt==0.25.4
+awscrt==0.26.1
# via localstack-core (pyproject.toml)
-boto3==1.37.18
+boto3==1.38.8
# via localstack-core (pyproject.toml)
-botocore==1.37.18
+botocore==1.38.8
# via
# boto3
# localstack-core (pyproject.toml)
@@ -24,17 +24,17 @@ cachetools==5.5.2
# via localstack-core (pyproject.toml)
cbor2==5.6.5
# via localstack-core (pyproject.toml)
-certifi==2025.1.31
+certifi==2025.4.26
# via requests
cffi==1.17.1
# via cryptography
-charset-normalizer==3.4.1
+charset-normalizer==3.4.2
# via requests
click==8.1.8
# via localstack-core (pyproject.toml)
constantly==23.10.4
# via localstack-twisted
-cryptography==44.0.2
+cryptography==44.0.3
# via
# localstack-core (pyproject.toml)
# pyopenssl
@@ -46,7 +46,7 @@ dnspython==2.7.0
# via localstack-core (pyproject.toml)
docker==7.1.0
# via localstack-core (pyproject.toml)
-h11==0.14.0
+h11==0.16.0
# via
# hypercorn
# wsproto
@@ -88,11 +88,11 @@ jsonschema-path==0.3.4
# via
# openapi-core
# openapi-spec-validator
-jsonschema-specifications==2024.10.1
+jsonschema-specifications==2025.4.1
# via
# jsonschema
# openapi-schema-validator
-lazy-object-proxy==1.10.0
+lazy-object-proxy==1.11.0
# via openapi-spec-validator
localstack-twisted==24.3.0
# via localstack-core (pyproject.toml)
@@ -102,7 +102,7 @@ markupsafe==3.0.2
# via werkzeug
mdurl==0.1.2
# via markdown-it-py
-more-itertools==10.6.0
+more-itertools==10.7.0
# via openapi-core
openapi-core==0.19.4
# via localstack-core (pyproject.toml)
@@ -112,7 +112,7 @@ openapi-schema-validator==0.6.3
# openapi-spec-validator
openapi-spec-validator==0.7.1
# via openapi-core
-packaging==24.2
+packaging==25.0
# via build
parse==1.20.2
# via openapi-core
@@ -162,15 +162,15 @@ requests-aws4auth==1.3.1
# via localstack-core (pyproject.toml)
rfc3339-validator==0.1.4
# via openapi-schema-validator
-rich==13.9.4
+rich==14.0.0
# via localstack-core (pyproject.toml)
rolo==0.7.5
# via localstack-core (pyproject.toml)
-rpds-py==0.23.1
+rpds-py==0.24.0
# via
# jsonschema
# referencing
-s3transfer==0.11.4
+s3transfer==0.12.0
# via boto3
semver==3.0.4
# via localstack-core (pyproject.toml)
@@ -180,13 +180,13 @@ six==1.17.0
# rfc3339-validator
tailer==0.4.1
# via localstack-core (pyproject.toml)
-typing-extensions==4.12.2
+typing-extensions==4.13.2
# via
# localstack-twisted
# pyopenssl
# readerwriterlock
# referencing
-urllib3==2.3.0
+urllib3==2.4.0
# via
# botocore
# docker
diff --git a/requirements-basic.txt b/requirements-basic.txt
index d22dc74524e1d..71a4c39b516e3 100644
--- a/requirements-basic.txt
+++ b/requirements-basic.txt
@@ -8,15 +8,15 @@ build==1.2.2.post1
# via localstack-core (pyproject.toml)
cachetools==5.5.2
# via localstack-core (pyproject.toml)
-certifi==2025.1.31
+certifi==2025.4.26
# via requests
cffi==1.17.1
# via cryptography
-charset-normalizer==3.4.1
+charset-normalizer==3.4.2
# via requests
click==8.1.8
# via localstack-core (pyproject.toml)
-cryptography==44.0.2
+cryptography==44.0.3
# via localstack-core (pyproject.toml)
dill==0.3.6
# via localstack-core (pyproject.toml)
@@ -30,7 +30,7 @@ markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
-packaging==24.2
+packaging==25.0
# via build
plux==1.12.1
# via localstack-core (pyproject.toml)
@@ -48,11 +48,11 @@ pyyaml==6.0.2
# via localstack-core (pyproject.toml)
requests==2.32.3
# via localstack-core (pyproject.toml)
-rich==13.9.4
+rich==14.0.0
# via localstack-core (pyproject.toml)
semver==3.0.4
# via localstack-core (pyproject.toml)
tailer==0.4.1
# via localstack-core (pyproject.toml)
-urllib3==2.3.0
+urllib3==2.4.0
# via requests
diff --git a/requirements-dev.txt b/requirements-dev.txt
index fef65fabee4bd..13e600fd905cb 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -6,8 +6,6 @@
#
airspeed-ext==0.6.7
# via localstack-core
-amazon-kclpy==3.0.2
- # via localstack-core
annotated-types==0.7.0
# via pydantic
antlr4-python3-runtime==4.13.2
@@ -19,7 +17,7 @@ anyio==4.9.0
apispec==6.8.1
# via localstack-core
argparse==1.4.0
- # via amazon-kclpy
+ # via kclpy-ext
attrs==25.3.0
# via
# cattrs
@@ -27,31 +25,31 @@ attrs==25.3.0
# jsonschema
# localstack-twisted
# referencing
-aws-cdk-asset-awscli-v1==2.2.229
+aws-cdk-asset-awscli-v1==2.2.235
# via aws-cdk-lib
aws-cdk-asset-node-proxy-agent-v6==2.1.0
# via aws-cdk-lib
-aws-cdk-cloud-assembly-schema==40.7.0
+aws-cdk-cloud-assembly-schema==41.2.0
# via aws-cdk-lib
-aws-cdk-lib==2.185.0
+aws-cdk-lib==2.194.0
# via localstack-core
-aws-sam-translator==1.95.0
+aws-sam-translator==1.97.0
# via
# cfn-lint
# localstack-core
aws-xray-sdk==2.14.0
# via moto-ext
-awscli==1.38.18
+awscli==1.40.7
# via localstack-core
-awscrt==0.25.4
+awscrt==0.26.1
# via localstack-core
-boto3==1.37.18
+boto3==1.38.8
# via
- # amazon-kclpy
# aws-sam-translator
+ # kclpy-ext
# localstack-core
# moto-ext
-botocore==1.37.18
+botocore==1.38.8
# via
# aws-xray-sdk
# awscli
@@ -68,11 +66,11 @@ cachetools==5.5.2
# airspeed-ext
# localstack-core
# localstack-core (pyproject.toml)
-cattrs==24.1.2
+cattrs==24.1.3
# via jsii
cbor2==5.6.5
# via localstack-core
-certifi==2025.1.31
+certifi==2025.4.26
# via
# httpcore
# httpx
@@ -82,9 +80,9 @@ cffi==1.17.1
# via cryptography
cfgv==3.4.0
# via pre-commit
-cfn-lint==1.32.0
+cfn-lint==1.34.2
# via moto-ext
-charset-normalizer==3.4.1
+charset-normalizer==3.4.2
# via requests
click==8.1.8
# via
@@ -96,15 +94,15 @@ constantly==23.10.4
# via localstack-twisted
constructs==10.4.2
# via aws-cdk-lib
-coverage==7.7.1
+coverage==7.8.0
# via
# coveralls
# localstack-core
coveralls==4.0.1
# via localstack-core (pyproject.toml)
-crontab==1.0.1
+crontab==1.0.4
# via localstack-core
-cryptography==44.0.2
+cryptography==44.0.3
# via
# joserfc
# localstack-core
@@ -140,7 +138,7 @@ docker==7.1.0
# moto-ext
docopt==0.6.2
# via coveralls
-docutils==0.16
+docutils==0.19
# via awscli
events==0.5
# via opensearch-py
@@ -148,7 +146,7 @@ filelock==3.18.0
# via virtualenv
graphql-core==3.2.6
# via moto-ext
-h11==0.14.0
+h11==0.16.0
# via
# httpcore
# hypercorn
@@ -160,7 +158,7 @@ h2==4.2.0
# localstack-twisted
hpack==4.1.0
# via h2
-httpcore==1.0.7
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via localstack-core
@@ -170,7 +168,7 @@ hyperframe==6.1.0
# via h2
hyperlink==21.0.0
# via localstack-twisted
-identify==2.6.9
+identify==2.6.10
# via pre-commit
idna==3.10
# via
@@ -197,14 +195,14 @@ joserfc==1.0.4
# via moto-ext
jpype1-ext==0.0.2
# via localstack-core
-jsii==1.110.0
+jsii==1.111.0
# via
# aws-cdk-asset-awscli-v1
# aws-cdk-asset-node-proxy-agent-v6
# aws-cdk-cloud-assembly-schema
# aws-cdk-lib
# constructs
-json5==0.10.0
+json5==0.12.0
# via localstack-core
jsonpatch==1.33
# via
@@ -230,11 +228,13 @@ jsonschema-path==0.3.4
# via
# openapi-core
# openapi-spec-validator
-jsonschema-specifications==2024.10.1
+jsonschema-specifications==2025.4.1
# via
# jsonschema
# openapi-schema-validator
-lazy-object-proxy==1.10.0
+kclpy-ext==3.0.3
+ # via localstack-core
+lazy-object-proxy==1.11.0
# via openapi-spec-validator
localstack-snapshot==0.2.0
# via localstack-core
@@ -248,14 +248,18 @@ markupsafe==3.0.2
# werkzeug
mdurl==0.1.2
# via markdown-it-py
-more-itertools==10.6.0
+more-itertools==10.7.0
# via openapi-core
-moto-ext==5.1.1.post1
+moto-ext==5.1.4.post1
# via localstack-core
mpmath==1.3.0
# via sympy
multipart==1.2.1
# via moto-ext
+mypy==1.15.0
+ # via localstack-core (pyproject.toml)
+mypy-extensions==1.1.0
+ # via mypy
networkx==3.4.2
# via
# cfn-lint
@@ -275,9 +279,9 @@ openapi-spec-validator==0.7.1
# openapi-core
opensearch-py==2.8.0
# via localstack-core
-orderly-set==5.3.0
+orderly-set==5.4.0
# via deepdiff
-packaging==24.2
+packaging==25.0
# via
# apispec
# build
@@ -331,13 +335,13 @@ pyasn1==0.6.1
# via rsa
pycparser==2.22
# via cffi
-pydantic==2.10.6
+pydantic==2.11.4
# via aws-sam-translator
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
pygments==2.19.1
# via rich
-pymongo==4.11.3
+pymongo==4.12.1
# via localstack-core
pyopenssl==25.0.0
# via
@@ -355,7 +359,7 @@ pytest==8.3.5
# pytest-rerunfailures
# pytest-split
# pytest-tinybird
-pytest-httpserver==1.1.1
+pytest-httpserver==1.1.3
# via localstack-core
pytest-rerunfailures==15.0
# via localstack-core
@@ -411,13 +415,13 @@ responses==0.25.7
# via moto-ext
rfc3339-validator==0.1.4
# via openapi-schema-validator
-rich==13.9.4
+rich==14.0.0
# via
# localstack-core
# localstack-core (pyproject.toml)
rolo==0.7.5
# via localstack-core
-rpds-py==0.23.1
+rpds-py==0.24.0
# via
# jsonschema
# referencing
@@ -425,9 +429,9 @@ rsa==4.7.2
# via awscli
rstr==3.2.2
# via localstack-core (pyproject.toml)
-ruff==0.11.2
+ruff==0.11.8
# via localstack-core (pyproject.toml)
-s3transfer==0.11.4
+s3transfer==0.12.0
# via
# awscli
# boto3
@@ -443,7 +447,7 @@ six==1.17.0
# rfc3339-validator
sniffio==1.3.1
# via anyio
-sympy==1.13.3
+sympy==1.14.0
# via cfn-lint
tailer==0.4.1
# via
@@ -457,19 +461,23 @@ typeguard==2.13.3
# aws-cdk-lib
# constructs
# jsii
-typing-extensions==4.12.2
+typing-extensions==4.13.2
# via
# anyio
# aws-sam-translator
# cfn-lint
# jsii
# localstack-twisted
+ # mypy
# pydantic
# pydantic-core
# pyopenssl
# readerwriterlock
# referencing
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.0
+ # via pydantic
+urllib3==2.4.0
# via
# botocore
# docker
@@ -477,7 +485,7 @@ urllib3==2.3.0
# opensearch-py
# requests
# responses
-virtualenv==20.29.3
+virtualenv==20.31.1
# via pre-commit
websocket-client==1.8.0
# via localstack-core
diff --git a/requirements-runtime.txt b/requirements-runtime.txt
index a75269182aedd..37cbf4908e40c 100644
--- a/requirements-runtime.txt
+++ b/requirements-runtime.txt
@@ -6,8 +6,6 @@
#
airspeed-ext==0.6.7
# via localstack-core (pyproject.toml)
-amazon-kclpy==3.0.2
- # via localstack-core (pyproject.toml)
annotated-types==0.7.0
# via pydantic
antlr4-python3-runtime==4.13.2
@@ -17,29 +15,29 @@ antlr4-python3-runtime==4.13.2
apispec==6.8.1
# via localstack-core (pyproject.toml)
argparse==1.4.0
- # via amazon-kclpy
+ # via kclpy-ext
attrs==25.3.0
# via
# jsonschema
# localstack-twisted
# referencing
-aws-sam-translator==1.95.0
+aws-sam-translator==1.97.0
# via
# cfn-lint
# localstack-core (pyproject.toml)
aws-xray-sdk==2.14.0
# via moto-ext
-awscli==1.38.18
+awscli==1.40.7
# via localstack-core (pyproject.toml)
-awscrt==0.25.4
+awscrt==0.26.1
# via localstack-core
-boto3==1.37.18
+boto3==1.38.8
# via
- # amazon-kclpy
# aws-sam-translator
+ # kclpy-ext
# localstack-core
# moto-ext
-botocore==1.37.18
+botocore==1.38.8
# via
# aws-xray-sdk
# awscli
@@ -58,15 +56,15 @@ cachetools==5.5.2
# localstack-core (pyproject.toml)
cbor2==5.6.5
# via localstack-core
-certifi==2025.1.31
+certifi==2025.4.26
# via
# opensearch-py
# requests
cffi==1.17.1
# via cryptography
-cfn-lint==1.32.0
+cfn-lint==1.34.2
# via moto-ext
-charset-normalizer==3.4.1
+charset-normalizer==3.4.2
# via requests
click==8.1.8
# via
@@ -76,9 +74,9 @@ colorama==0.4.6
# via awscli
constantly==23.10.4
# via localstack-twisted
-crontab==1.0.1
+crontab==1.0.4
# via localstack-core (pyproject.toml)
-cryptography==44.0.2
+cryptography==44.0.3
# via
# joserfc
# localstack-core
@@ -104,13 +102,13 @@ docker==7.1.0
# via
# localstack-core
# moto-ext
-docutils==0.16
+docutils==0.19
# via awscli
events==0.5
# via opensearch-py
graphql-core==3.2.6
# via moto-ext
-h11==0.14.0
+h11==0.16.0
# via
# hypercorn
# wsproto
@@ -145,7 +143,7 @@ joserfc==1.0.4
# via moto-ext
jpype1-ext==0.0.2
# via localstack-core (pyproject.toml)
-json5==0.10.0
+json5==0.12.0
# via localstack-core (pyproject.toml)
jsonpatch==1.33
# via
@@ -170,11 +168,13 @@ jsonschema-path==0.3.4
# via
# openapi-core
# openapi-spec-validator
-jsonschema-specifications==2024.10.1
+jsonschema-specifications==2025.4.1
# via
# jsonschema
# openapi-schema-validator
-lazy-object-proxy==1.10.0
+kclpy-ext==3.0.3
+ # via localstack-core (pyproject.toml)
+lazy-object-proxy==1.11.0
# via openapi-spec-validator
localstack-twisted==24.3.0
# via localstack-core
@@ -186,9 +186,9 @@ markupsafe==3.0.2
# werkzeug
mdurl==0.1.2
# via markdown-it-py
-more-itertools==10.6.0
+more-itertools==10.7.0
# via openapi-core
-moto-ext==5.1.1.post1
+moto-ext==5.1.4.post1
# via localstack-core (pyproject.toml)
mpmath==1.3.0
# via sympy
@@ -208,7 +208,7 @@ openapi-spec-validator==0.7.1
# openapi-core
opensearch-py==2.8.0
# via localstack-core (pyproject.toml)
-packaging==24.2
+packaging==25.0
# via
# apispec
# build
@@ -239,13 +239,13 @@ pyasn1==0.6.1
# via rsa
pycparser==2.22
# via cffi
-pydantic==2.10.6
+pydantic==2.11.4
# via aws-sam-translator
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
pygments==2.19.1
# via rich
-pymongo==4.11.3
+pymongo==4.12.1
# via localstack-core (pyproject.toml)
pyopenssl==25.0.0
# via
@@ -300,19 +300,19 @@ responses==0.25.7
# via moto-ext
rfc3339-validator==0.1.4
# via openapi-schema-validator
-rich==13.9.4
+rich==14.0.0
# via
# localstack-core
# localstack-core (pyproject.toml)
rolo==0.7.5
# via localstack-core
-rpds-py==0.23.1
+rpds-py==0.24.0
# via
# jsonschema
# referencing
rsa==4.7.2
# via awscli
-s3transfer==0.11.4
+s3transfer==0.12.0
# via
# awscli
# boto3
@@ -326,13 +326,13 @@ six==1.17.0
# jsonpath-rw
# python-dateutil
# rfc3339-validator
-sympy==1.13.3
+sympy==1.14.0
# via cfn-lint
tailer==0.4.1
# via
# localstack-core
# localstack-core (pyproject.toml)
-typing-extensions==4.12.2
+typing-extensions==4.13.2
# via
# aws-sam-translator
# cfn-lint
@@ -342,7 +342,10 @@ typing-extensions==4.12.2
# pyopenssl
# readerwriterlock
# referencing
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.0
+ # via pydantic
+urllib3==2.4.0
# via
# botocore
# docker
diff --git a/requirements-test.txt b/requirements-test.txt
index 703ff7f52941e..eeb774517342f 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -6,8 +6,6 @@
#
airspeed-ext==0.6.7
# via localstack-core
-amazon-kclpy==3.0.2
- # via localstack-core
annotated-types==0.7.0
# via pydantic
antlr4-python3-runtime==4.13.2
@@ -19,7 +17,7 @@ anyio==4.9.0
apispec==6.8.1
# via localstack-core
argparse==1.4.0
- # via amazon-kclpy
+ # via kclpy-ext
attrs==25.3.0
# via
# cattrs
@@ -27,31 +25,31 @@ attrs==25.3.0
# jsonschema
# localstack-twisted
# referencing
-aws-cdk-asset-awscli-v1==2.2.229
+aws-cdk-asset-awscli-v1==2.2.235
# via aws-cdk-lib
aws-cdk-asset-node-proxy-agent-v6==2.1.0
# via aws-cdk-lib
-aws-cdk-cloud-assembly-schema==40.7.0
+aws-cdk-cloud-assembly-schema==41.2.0
# via aws-cdk-lib
-aws-cdk-lib==2.185.0
+aws-cdk-lib==2.194.0
# via localstack-core (pyproject.toml)
-aws-sam-translator==1.95.0
+aws-sam-translator==1.97.0
# via
# cfn-lint
# localstack-core
aws-xray-sdk==2.14.0
# via moto-ext
-awscli==1.38.18
+awscli==1.40.7
# via localstack-core
-awscrt==0.25.4
+awscrt==0.26.1
# via localstack-core
-boto3==1.37.18
+boto3==1.38.8
# via
- # amazon-kclpy
# aws-sam-translator
+ # kclpy-ext
# localstack-core
# moto-ext
-botocore==1.37.18
+botocore==1.38.8
# via
# aws-xray-sdk
# awscli
@@ -68,11 +66,11 @@ cachetools==5.5.2
# airspeed-ext
# localstack-core
# localstack-core (pyproject.toml)
-cattrs==24.1.2
+cattrs==24.1.3
# via jsii
cbor2==5.6.5
# via localstack-core
-certifi==2025.1.31
+certifi==2025.4.26
# via
# httpcore
# httpx
@@ -80,9 +78,9 @@ certifi==2025.1.31
# requests
cffi==1.17.1
# via cryptography
-cfn-lint==1.32.0
+cfn-lint==1.34.2
# via moto-ext
-charset-normalizer==3.4.1
+charset-normalizer==3.4.2
# via requests
click==8.1.8
# via
@@ -94,11 +92,11 @@ constantly==23.10.4
# via localstack-twisted
constructs==10.4.2
# via aws-cdk-lib
-coverage==7.7.1
+coverage==7.8.0
# via localstack-core (pyproject.toml)
-crontab==1.0.1
+crontab==1.0.4
# via localstack-core
-cryptography==44.0.2
+cryptography==44.0.3
# via
# joserfc
# localstack-core
@@ -128,13 +126,13 @@ docker==7.1.0
# via
# localstack-core
# moto-ext
-docutils==0.16
+docutils==0.19
# via awscli
events==0.5
# via opensearch-py
graphql-core==3.2.6
# via moto-ext
-h11==0.14.0
+h11==0.16.0
# via
# httpcore
# hypercorn
@@ -146,7 +144,7 @@ h2==4.2.0
# localstack-twisted
hpack==4.1.0
# via h2
-httpcore==1.0.7
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via localstack-core (pyproject.toml)
@@ -181,14 +179,14 @@ joserfc==1.0.4
# via moto-ext
jpype1-ext==0.0.2
# via localstack-core
-jsii==1.110.0
+jsii==1.111.0
# via
# aws-cdk-asset-awscli-v1
# aws-cdk-asset-node-proxy-agent-v6
# aws-cdk-cloud-assembly-schema
# aws-cdk-lib
# constructs
-json5==0.10.0
+json5==0.12.0
# via localstack-core
jsonpatch==1.33
# via
@@ -214,11 +212,13 @@ jsonschema-path==0.3.4
# via
# openapi-core
# openapi-spec-validator
-jsonschema-specifications==2024.10.1
+jsonschema-specifications==2025.4.1
# via
# jsonschema
# openapi-schema-validator
-lazy-object-proxy==1.10.0
+kclpy-ext==3.0.3
+ # via localstack-core
+lazy-object-proxy==1.11.0
# via openapi-spec-validator
localstack-snapshot==0.2.0
# via localstack-core (pyproject.toml)
@@ -232,9 +232,9 @@ markupsafe==3.0.2
# werkzeug
mdurl==0.1.2
# via markdown-it-py
-more-itertools==10.6.0
+more-itertools==10.7.0
# via openapi-core
-moto-ext==5.1.1.post1
+moto-ext==5.1.4.post1
# via localstack-core
mpmath==1.3.0
# via sympy
@@ -254,9 +254,9 @@ openapi-spec-validator==0.7.1
# openapi-core
opensearch-py==2.8.0
# via localstack-core
-orderly-set==5.3.0
+orderly-set==5.4.0
# via deepdiff
-packaging==24.2
+packaging==25.0
# via
# apispec
# build
@@ -301,13 +301,13 @@ pyasn1==0.6.1
# via rsa
pycparser==2.22
# via cffi
-pydantic==2.10.6
+pydantic==2.11.4
# via aws-sam-translator
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
pygments==2.19.1
# via rich
-pymongo==4.11.3
+pymongo==4.12.1
# via localstack-core
pyopenssl==25.0.0
# via
@@ -323,7 +323,7 @@ pytest==8.3.5
# pytest-rerunfailures
# pytest-split
# pytest-tinybird
-pytest-httpserver==1.1.1
+pytest-httpserver==1.1.3
# via localstack-core (pyproject.toml)
pytest-rerunfailures==15.0
# via localstack-core (pyproject.toml)
@@ -377,19 +377,19 @@ responses==0.25.7
# via moto-ext
rfc3339-validator==0.1.4
# via openapi-schema-validator
-rich==13.9.4
+rich==14.0.0
# via
# localstack-core
# localstack-core (pyproject.toml)
rolo==0.7.5
# via localstack-core
-rpds-py==0.23.1
+rpds-py==0.24.0
# via
# jsonschema
# referencing
rsa==4.7.2
# via awscli
-s3transfer==0.11.4
+s3transfer==0.12.0
# via
# awscli
# boto3
@@ -405,7 +405,7 @@ six==1.17.0
# rfc3339-validator
sniffio==1.3.1
# via anyio
-sympy==1.13.3
+sympy==1.14.0
# via cfn-lint
tailer==0.4.1
# via
@@ -419,7 +419,7 @@ typeguard==2.13.3
# aws-cdk-lib
# constructs
# jsii
-typing-extensions==4.12.2
+typing-extensions==4.13.2
# via
# anyio
# aws-sam-translator
@@ -431,7 +431,10 @@ typing-extensions==4.12.2
# pyopenssl
# readerwriterlock
# referencing
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.0
+ # via pydantic
+urllib3==2.4.0
# via
# botocore
# docker
diff --git a/requirements-typehint.txt b/requirements-typehint.txt
index 61fb83ea593c0..b194cae94ca68 100644
--- a/requirements-typehint.txt
+++ b/requirements-typehint.txt
@@ -6,8 +6,6 @@
#
airspeed-ext==0.6.7
# via localstack-core
-amazon-kclpy==3.0.2
- # via localstack-core
annotated-types==0.7.0
# via pydantic
antlr4-python3-runtime==4.13.2
@@ -19,7 +17,7 @@ anyio==4.9.0
apispec==6.8.1
# via localstack-core
argparse==1.4.0
- # via amazon-kclpy
+ # via kclpy-ext
attrs==25.3.0
# via
# cattrs
@@ -27,33 +25,33 @@ attrs==25.3.0
# jsonschema
# localstack-twisted
# referencing
-aws-cdk-asset-awscli-v1==2.2.229
+aws-cdk-asset-awscli-v1==2.2.235
# via aws-cdk-lib
aws-cdk-asset-node-proxy-agent-v6==2.1.0
# via aws-cdk-lib
-aws-cdk-cloud-assembly-schema==40.7.0
+aws-cdk-cloud-assembly-schema==41.2.0
# via aws-cdk-lib
-aws-cdk-lib==2.185.0
+aws-cdk-lib==2.194.0
# via localstack-core
-aws-sam-translator==1.95.0
+aws-sam-translator==1.97.0
# via
# cfn-lint
# localstack-core
aws-xray-sdk==2.14.0
# via moto-ext
-awscli==1.38.18
+awscli==1.40.7
# via localstack-core
-awscrt==0.25.4
+awscrt==0.26.1
# via localstack-core
-boto3==1.37.18
+boto3==1.38.8
# via
- # amazon-kclpy
# aws-sam-translator
+ # kclpy-ext
# localstack-core
# moto-ext
-boto3-stubs==1.37.19
+boto3-stubs==1.38.9
# via localstack-core (pyproject.toml)
-botocore==1.37.18
+botocore==1.38.8
# via
# aws-xray-sdk
# awscli
@@ -61,7 +59,7 @@ botocore==1.37.18
# localstack-core
# moto-ext
# s3transfer
-botocore-stubs==1.37.19
+botocore-stubs==1.38.9
# via boto3-stubs
build==1.2.2.post1
# via
@@ -72,11 +70,11 @@ cachetools==5.5.2
# airspeed-ext
# localstack-core
# localstack-core (pyproject.toml)
-cattrs==24.1.2
+cattrs==24.1.3
# via jsii
cbor2==5.6.5
# via localstack-core
-certifi==2025.1.31
+certifi==2025.4.26
# via
# httpcore
# httpx
@@ -86,9 +84,9 @@ cffi==1.17.1
# via cryptography
cfgv==3.4.0
# via pre-commit
-cfn-lint==1.32.0
+cfn-lint==1.34.2
# via moto-ext
-charset-normalizer==3.4.1
+charset-normalizer==3.4.2
# via requests
click==8.1.8
# via
@@ -100,15 +98,15 @@ constantly==23.10.4
# via localstack-twisted
constructs==10.4.2
# via aws-cdk-lib
-coverage==7.7.1
+coverage==7.8.0
# via
# coveralls
# localstack-core
coveralls==4.0.1
# via localstack-core
-crontab==1.0.1
+crontab==1.0.4
# via localstack-core
-cryptography==44.0.2
+cryptography==44.0.3
# via
# joserfc
# localstack-core
@@ -144,7 +142,7 @@ docker==7.1.0
# moto-ext
docopt==0.6.2
# via coveralls
-docutils==0.16
+docutils==0.19
# via awscli
events==0.5
# via opensearch-py
@@ -152,7 +150,7 @@ filelock==3.18.0
# via virtualenv
graphql-core==3.2.6
# via moto-ext
-h11==0.14.0
+h11==0.16.0
# via
# httpcore
# hypercorn
@@ -164,7 +162,7 @@ h2==4.2.0
# localstack-twisted
hpack==4.1.0
# via h2
-httpcore==1.0.7
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via localstack-core
@@ -174,7 +172,7 @@ hyperframe==6.1.0
# via h2
hyperlink==21.0.0
# via localstack-twisted
-identify==2.6.9
+identify==2.6.10
# via pre-commit
idna==3.10
# via
@@ -201,14 +199,14 @@ joserfc==1.0.4
# via moto-ext
jpype1-ext==0.0.2
# via localstack-core
-jsii==1.110.0
+jsii==1.111.0
# via
# aws-cdk-asset-awscli-v1
# aws-cdk-asset-node-proxy-agent-v6
# aws-cdk-cloud-assembly-schema
# aws-cdk-lib
# constructs
-json5==0.10.0
+json5==0.12.0
# via localstack-core
jsonpatch==1.33
# via
@@ -234,11 +232,13 @@ jsonschema-path==0.3.4
# via
# openapi-core
# openapi-spec-validator
-jsonschema-specifications==2024.10.1
+jsonschema-specifications==2025.4.1
# via
# jsonschema
# openapi-schema-validator
-lazy-object-proxy==1.10.0
+kclpy-ext==3.0.3
+ # via localstack-core
+lazy-object-proxy==1.11.0
# via openapi-spec-validator
localstack-snapshot==0.2.0
# via localstack-core
@@ -252,216 +252,224 @@ markupsafe==3.0.2
# werkzeug
mdurl==0.1.2
# via markdown-it-py
-more-itertools==10.6.0
+more-itertools==10.7.0
# via openapi-core
-moto-ext==5.1.1.post1
+moto-ext==5.1.4.post1
# via localstack-core
mpmath==1.3.0
# via sympy
multipart==1.2.1
# via moto-ext
-mypy-boto3-acm==1.37.0
+mypy==1.15.0
+ # via localstack-core
+mypy-boto3-acm==1.38.4
# via boto3-stubs
-mypy-boto3-acm-pca==1.37.12
+mypy-boto3-acm-pca==1.38.0
# via boto3-stubs
-mypy-boto3-amplify==1.37.17
+mypy-boto3-amplify==1.38.0
# via boto3-stubs
-mypy-boto3-apigateway==1.37.0
+mypy-boto3-apigateway==1.38.0
# via boto3-stubs
-mypy-boto3-apigatewayv2==1.37.0
+mypy-boto3-apigatewayv2==1.38.0
# via boto3-stubs
-mypy-boto3-appconfig==1.37.0
+mypy-boto3-appconfig==1.38.7
# via boto3-stubs
-mypy-boto3-appconfigdata==1.37.0
+mypy-boto3-appconfigdata==1.38.0
# via boto3-stubs
-mypy-boto3-application-autoscaling==1.37.0
+mypy-boto3-application-autoscaling==1.38.0
# via boto3-stubs
-mypy-boto3-appsync==1.37.15
+mypy-boto3-appsync==1.38.2
# via boto3-stubs
-mypy-boto3-athena==1.37.0
+mypy-boto3-athena==1.38.0
# via boto3-stubs
-mypy-boto3-autoscaling==1.37.0
+mypy-boto3-autoscaling==1.38.0
# via boto3-stubs
-mypy-boto3-backup==1.37.0
+mypy-boto3-backup==1.38.0
# via boto3-stubs
-mypy-boto3-batch==1.37.2
+mypy-boto3-batch==1.38.0
# via boto3-stubs
-mypy-boto3-ce==1.37.10
+mypy-boto3-ce==1.38.0
# via boto3-stubs
-mypy-boto3-cloudcontrol==1.37.0
+mypy-boto3-cloudcontrol==1.38.0
# via boto3-stubs
-mypy-boto3-cloudformation==1.37.0
+mypy-boto3-cloudformation==1.38.0
# via boto3-stubs
-mypy-boto3-cloudfront==1.37.9
+mypy-boto3-cloudfront==1.38.4
# via boto3-stubs
-mypy-boto3-cloudtrail==1.37.8
+mypy-boto3-cloudtrail==1.38.0
# via boto3-stubs
-mypy-boto3-cloudwatch==1.37.0
+mypy-boto3-cloudwatch==1.38.0
# via boto3-stubs
-mypy-boto3-codebuild==1.37.12
+mypy-boto3-codebuild==1.38.2
# via boto3-stubs
-mypy-boto3-codecommit==1.37.0
+mypy-boto3-codecommit==1.38.0
# via boto3-stubs
-mypy-boto3-codedeploy==1.37.0
+mypy-boto3-codeconnections==1.38.0
# via boto3-stubs
-mypy-boto3-codepipeline==1.37.0
+mypy-boto3-codedeploy==1.38.0
# via boto3-stubs
-mypy-boto3-codestar-connections==1.37.0
+mypy-boto3-codepipeline==1.38.0
# via boto3-stubs
-mypy-boto3-cognito-identity==1.37.13
+mypy-boto3-codestar-connections==1.38.0
# via boto3-stubs
-mypy-boto3-cognito-idp==1.37.13.post1
+mypy-boto3-cognito-identity==1.38.0
# via boto3-stubs
-mypy-boto3-dms==1.37.4
+mypy-boto3-cognito-idp==1.38.0
# via boto3-stubs
-mypy-boto3-docdb==1.37.0
+mypy-boto3-dms==1.38.0
# via boto3-stubs
-mypy-boto3-dynamodb==1.37.12
+mypy-boto3-docdb==1.38.0
# via boto3-stubs
-mypy-boto3-dynamodbstreams==1.37.0
+mypy-boto3-dynamodb==1.38.4
# via boto3-stubs
-mypy-boto3-ec2==1.37.16
+mypy-boto3-dynamodbstreams==1.38.0
# via boto3-stubs
-mypy-boto3-ecr==1.37.11
+mypy-boto3-ec2==1.38.9
# via boto3-stubs
-mypy-boto3-ecs==1.37.11
+mypy-boto3-ecr==1.38.6
# via boto3-stubs
-mypy-boto3-efs==1.37.0
+mypy-boto3-ecs==1.38.9
# via boto3-stubs
-mypy-boto3-eks==1.37.4
+mypy-boto3-efs==1.38.0
# via boto3-stubs
-mypy-boto3-elasticache==1.37.6
+mypy-boto3-eks==1.38.0
# via boto3-stubs
-mypy-boto3-elasticbeanstalk==1.37.0
+mypy-boto3-elasticache==1.38.0
# via boto3-stubs
-mypy-boto3-elbv2==1.37.9
+mypy-boto3-elasticbeanstalk==1.38.0
# via boto3-stubs
-mypy-boto3-emr==1.37.3
+mypy-boto3-elbv2==1.38.0
# via boto3-stubs
-mypy-boto3-emr-serverless==1.37.0
+mypy-boto3-emr==1.38.0
# via boto3-stubs
-mypy-boto3-es==1.37.0
+mypy-boto3-emr-serverless==1.38.0
# via boto3-stubs
-mypy-boto3-events==1.37.0
+mypy-boto3-es==1.38.0
# via boto3-stubs
-mypy-boto3-firehose==1.37.0
+mypy-boto3-events==1.38.0
# via boto3-stubs
-mypy-boto3-fis==1.37.0
+mypy-boto3-firehose==1.38.0
# via boto3-stubs
-mypy-boto3-glacier==1.37.0
+mypy-boto3-fis==1.38.0
# via boto3-stubs
-mypy-boto3-glue==1.37.13
+mypy-boto3-glacier==1.38.0
# via boto3-stubs
-mypy-boto3-iam==1.37.0
+mypy-boto3-glue==1.38.0
# via boto3-stubs
-mypy-boto3-identitystore==1.37.0
+mypy-boto3-iam==1.38.0
# via boto3-stubs
-mypy-boto3-iot==1.37.1
+mypy-boto3-identitystore==1.38.0
# via boto3-stubs
-mypy-boto3-iot-data==1.37.0
+mypy-boto3-iot==1.38.0
# via boto3-stubs
-mypy-boto3-iotanalytics==1.37.0
+mypy-boto3-iot-data==1.38.0
# via boto3-stubs
-mypy-boto3-iotwireless==1.37.19
+mypy-boto3-iotanalytics==1.38.0
# via boto3-stubs
-mypy-boto3-kafka==1.37.0
+mypy-boto3-iotwireless==1.38.0
# via boto3-stubs
-mypy-boto3-kinesis==1.37.0
+mypy-boto3-kafka==1.38.0
# via boto3-stubs
-mypy-boto3-kinesisanalytics==1.37.0
+mypy-boto3-kinesis==1.38.8
# via boto3-stubs
-mypy-boto3-kinesisanalyticsv2==1.37.0
+mypy-boto3-kinesisanalytics==1.38.0
# via boto3-stubs
-mypy-boto3-kms==1.37.0
+mypy-boto3-kinesisanalyticsv2==1.38.0
# via boto3-stubs
-mypy-boto3-lakeformation==1.37.13
+mypy-boto3-kms==1.38.0
# via boto3-stubs
-mypy-boto3-lambda==1.37.16
+mypy-boto3-lakeformation==1.38.0
# via boto3-stubs
-mypy-boto3-logs==1.37.12
+mypy-boto3-lambda==1.38.0
# via boto3-stubs
-mypy-boto3-managedblockchain==1.37.0
+mypy-boto3-logs==1.38.6
# via boto3-stubs
-mypy-boto3-mediaconvert==1.37.15
+mypy-boto3-managedblockchain==1.38.0
# via boto3-stubs
-mypy-boto3-mediastore==1.37.0
+mypy-boto3-mediaconvert==1.38.9
# via boto3-stubs
-mypy-boto3-mq==1.37.0
+mypy-boto3-mediastore==1.38.0
# via boto3-stubs
-mypy-boto3-mwaa==1.37.0
+mypy-boto3-mq==1.38.0
# via boto3-stubs
-mypy-boto3-neptune==1.37.0
+mypy-boto3-mwaa==1.38.0
# via boto3-stubs
-mypy-boto3-opensearch==1.37.0
+mypy-boto3-neptune==1.38.0
# via boto3-stubs
-mypy-boto3-organizations==1.37.0
+mypy-boto3-opensearch==1.38.0
# via boto3-stubs
-mypy-boto3-pi==1.37.0
+mypy-boto3-organizations==1.38.0
# via boto3-stubs
-mypy-boto3-pinpoint==1.37.0
+mypy-boto3-pi==1.38.0
# via boto3-stubs
-mypy-boto3-pipes==1.37.0
+mypy-boto3-pinpoint==1.38.0
# via boto3-stubs
-mypy-boto3-qldb==1.37.0
+mypy-boto3-pipes==1.38.0
# via boto3-stubs
-mypy-boto3-qldb-session==1.37.0
+mypy-boto3-qldb==1.38.0
# via boto3-stubs
-mypy-boto3-rds==1.37.6
+mypy-boto3-qldb-session==1.38.0
# via boto3-stubs
-mypy-boto3-rds-data==1.37.0
+mypy-boto3-rds==1.38.2
# via boto3-stubs
-mypy-boto3-redshift==1.37.0
+mypy-boto3-rds-data==1.38.0
# via boto3-stubs
-mypy-boto3-redshift-data==1.37.8
+mypy-boto3-redshift==1.38.0
# via boto3-stubs
-mypy-boto3-resource-groups==1.37.0
+mypy-boto3-redshift-data==1.38.0
# via boto3-stubs
-mypy-boto3-resourcegroupstaggingapi==1.37.0
+mypy-boto3-resource-groups==1.38.0
# via boto3-stubs
-mypy-boto3-route53==1.37.15
+mypy-boto3-resourcegroupstaggingapi==1.38.0
# via boto3-stubs
-mypy-boto3-route53resolver==1.37.0
+mypy-boto3-route53==1.38.0
# via boto3-stubs
-mypy-boto3-s3==1.37.0
+mypy-boto3-route53resolver==1.38.0
# via boto3-stubs
-mypy-boto3-s3control==1.37.12
+mypy-boto3-s3==1.38.0
# via boto3-stubs
-mypy-boto3-sagemaker==1.37.18
+mypy-boto3-s3control==1.38.0
# via boto3-stubs
-mypy-boto3-sagemaker-runtime==1.37.0
+mypy-boto3-sagemaker==1.38.7
# via boto3-stubs
-mypy-boto3-secretsmanager==1.37.0
+mypy-boto3-sagemaker-runtime==1.38.0
# via boto3-stubs
-mypy-boto3-serverlessrepo==1.37.0
+mypy-boto3-secretsmanager==1.38.0
# via boto3-stubs
-mypy-boto3-servicediscovery==1.37.0
+mypy-boto3-serverlessrepo==1.38.0
# via boto3-stubs
-mypy-boto3-ses==1.37.0
+mypy-boto3-servicediscovery==1.38.0
# via boto3-stubs
-mypy-boto3-sesv2==1.37.0
+mypy-boto3-ses==1.38.0
# via boto3-stubs
-mypy-boto3-sns==1.37.0
+mypy-boto3-sesv2==1.38.0
# via boto3-stubs
-mypy-boto3-sqs==1.37.0
+mypy-boto3-sns==1.38.0
# via boto3-stubs
-mypy-boto3-ssm==1.37.19
+mypy-boto3-sqs==1.38.0
# via boto3-stubs
-mypy-boto3-sso-admin==1.37.0
+mypy-boto3-ssm==1.38.5
# via boto3-stubs
-mypy-boto3-stepfunctions==1.37.0
+mypy-boto3-sso-admin==1.38.0
# via boto3-stubs
-mypy-boto3-sts==1.37.0
+mypy-boto3-stepfunctions==1.38.0
# via boto3-stubs
-mypy-boto3-timestream-query==1.37.0
+mypy-boto3-sts==1.38.0
# via boto3-stubs
-mypy-boto3-timestream-write==1.37.0
+mypy-boto3-timestream-query==1.38.0
# via boto3-stubs
-mypy-boto3-transcribe==1.37.5
+mypy-boto3-timestream-write==1.38.0
# via boto3-stubs
-mypy-boto3-wafv2==1.37.14
+mypy-boto3-transcribe==1.38.0
# via boto3-stubs
-mypy-boto3-xray==1.37.0
+mypy-boto3-verifiedpermissions==1.38.7
# via boto3-stubs
+mypy-boto3-wafv2==1.38.0
+ # via boto3-stubs
+mypy-boto3-xray==1.38.0
+ # via boto3-stubs
+mypy-extensions==1.1.0
+ # via mypy
networkx==3.4.2
# via
# cfn-lint
@@ -481,9 +489,9 @@ openapi-spec-validator==0.7.1
# openapi-core
opensearch-py==2.8.0
# via localstack-core
-orderly-set==5.3.0
+orderly-set==5.4.0
# via deepdiff
-packaging==24.2
+packaging==25.0
# via
# apispec
# build
@@ -537,13 +545,13 @@ pyasn1==0.6.1
# via rsa
pycparser==2.22
# via cffi
-pydantic==2.10.6
+pydantic==2.11.4
# via aws-sam-translator
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
pygments==2.19.1
# via rich
-pymongo==4.11.3
+pymongo==4.12.1
# via localstack-core
pyopenssl==25.0.0
# via
@@ -561,7 +569,7 @@ pytest==8.3.5
# pytest-rerunfailures
# pytest-split
# pytest-tinybird
-pytest-httpserver==1.1.1
+pytest-httpserver==1.1.3
# via localstack-core
pytest-rerunfailures==15.0
# via localstack-core
@@ -617,13 +625,13 @@ responses==0.25.7
# via moto-ext
rfc3339-validator==0.1.4
# via openapi-schema-validator
-rich==13.9.4
+rich==14.0.0
# via
# localstack-core
# localstack-core (pyproject.toml)
rolo==0.7.5
# via localstack-core
-rpds-py==0.23.1
+rpds-py==0.24.0
# via
# jsonschema
# referencing
@@ -631,9 +639,9 @@ rsa==4.7.2
# via awscli
rstr==3.2.2
# via localstack-core
-ruff==0.11.2
+ruff==0.11.8
# via localstack-core
-s3transfer==0.11.4
+s3transfer==0.12.0
# via
# awscli
# boto3
@@ -649,7 +657,7 @@ six==1.17.0
# rfc3339-validator
sniffio==1.3.1
# via anyio
-sympy==1.13.3
+sympy==1.14.0
# via cfn-lint
tailer==0.4.1
# via
@@ -663,11 +671,11 @@ typeguard==2.13.3
# aws-cdk-lib
# constructs
# jsii
-types-awscrt==0.24.2
+types-awscrt==0.26.1
# via botocore-stubs
-types-s3transfer==0.11.4
+types-s3transfer==0.12.0
# via boto3-stubs
-typing-extensions==4.12.2
+typing-extensions==4.13.2
# via
# anyio
# aws-sam-translator
@@ -675,6 +683,7 @@ typing-extensions==4.12.2
# cfn-lint
# jsii
# localstack-twisted
+ # mypy
# mypy-boto3-acm
# mypy-boto3-acm-pca
# mypy-boto3-amplify
@@ -696,6 +705,7 @@ typing-extensions==4.12.2
# mypy-boto3-cloudwatch
# mypy-boto3-codebuild
# mypy-boto3-codecommit
+ # mypy-boto3-codeconnections
# mypy-boto3-codedeploy
# mypy-boto3-codepipeline
# mypy-boto3-codestar-connections
@@ -774,6 +784,7 @@ typing-extensions==4.12.2
# mypy-boto3-timestream-query
# mypy-boto3-timestream-write
# mypy-boto3-transcribe
+ # mypy-boto3-verifiedpermissions
# mypy-boto3-wafv2
# mypy-boto3-xray
# pydantic
@@ -781,7 +792,10 @@ typing-extensions==4.12.2
# pyopenssl
# readerwriterlock
# referencing
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.0
+ # via pydantic
+urllib3==2.4.0
# via
# botocore
# docker
@@ -789,7 +803,7 @@ urllib3==2.3.0
# opensearch-py
# requests
# responses
-virtualenv==20.29.3
+virtualenv==20.31.1
# via pre-commit
websocket-client==1.8.0
# via localstack-core
diff --git a/tests/aws/conftest.py b/tests/aws/conftest.py
index c5c5612b76cef..3292bc6523de5 100644
--- a/tests/aws/conftest.py
+++ b/tests/aws/conftest.py
@@ -8,7 +8,6 @@
from localstack import config as localstack_config
from localstack import constants
-from localstack.testing.scenario.provisioning import InfraProvisioner
from localstack.testing.snapshots.transformer_utility import (
SNAPSHOT_BASIC_TRANSFORMER,
SNAPSHOT_BASIC_TRANSFORMER_NEW,
@@ -85,6 +84,9 @@ def cdk_template_path():
# Note: Don't move this into testing lib
@pytest.fixture(scope="session")
def infrastructure_setup(cdk_template_path, aws_client):
+ # Note: import needs to be local to avoid CDK import on every test run, which takes quite some time
+ from localstack.testing.scenario.provisioning import InfraProvisioner
+
def _infrastructure_setup(
namespace: str, force_synth: Optional[bool] = False
) -> InfraProvisioner:
diff --git a/tests/aws/files/pets.json b/tests/aws/files/pets.json
index 1965dd545a253..0e4f769ea277c 100644
--- a/tests/aws/files/pets.json
+++ b/tests/aws/files/pets.json
@@ -7,6 +7,10 @@
"schemes": [
"https"
],
+ "x-amazon-apigateway-binary-media-types": [
+ "image/png",
+ "image/jpg"
+ ],
"paths": {
"/pets": {
"get": {
diff --git a/tests/aws/services/apigateway/apigateway_fixtures.py b/tests/aws/services/apigateway/apigateway_fixtures.py
index 0c0b549032df0..e7d58b40c5ba2 100644
--- a/tests/aws/services/apigateway/apigateway_fixtures.py
+++ b/tests/aws/services/apigateway/apigateway_fixtures.py
@@ -35,74 +35,24 @@ def import_rest_api(apigateway_client, **kwargs):
return response, root_id
-def get_rest_api(apigateway_client, **kwargs):
- response = apigateway_client.get_rest_api(**kwargs)
- assert_response_is_200(response)
- return response.get("id"), response.get("name")
-
-
-def put_rest_api(apigateway_client, **kwargs):
- response = apigateway_client.put_rest_api(**kwargs)
- assert_response_is_200(response)
- return response.get("id"), response.get("name")
-
-
-def get_rest_apis(apigateway_client, **kwargs):
- response = apigateway_client.get_rest_apis(**kwargs)
- assert_response_is_200(response)
- return response.get("items")
-
-
-def delete_rest_api(apigateway_client, **kwargs):
- response = apigateway_client.delete_rest_api(**kwargs)
- assert_response_status(response, 202)
-
-
def create_rest_resource(apigateway_client, **kwargs):
response = apigateway_client.create_resource(**kwargs)
assert_response_is_201(response)
return response.get("id"), response.get("parentId")
-def delete_rest_resource(apigateway_client, **kwargs):
- response = apigateway_client.delete_resource(**kwargs)
- assert_response_is_200(response)
-
-
def create_rest_resource_method(apigateway_client, **kwargs):
response = apigateway_client.put_method(**kwargs)
assert_response_is_201(response)
return response.get("httpMethod"), response.get("authorizerId")
-def create_rest_authorizer(apigateway_client, **kwargs):
- response = apigateway_client.create_authorizer(**kwargs)
- assert_response_is_201(response)
- return response.get("id"), response.get("type")
-
-
def create_rest_api_integration(apigateway_client, **kwargs):
response = apigateway_client.put_integration(**kwargs)
assert_response_is_201(response)
return response.get("uri"), response.get("type")
-def get_rest_api_resources(apigateway_client, **kwargs):
- response = apigateway_client.get_resources(**kwargs)
- assert_response_is_200(response)
- return response.get("items")
-
-
-def delete_rest_api_integration(apigateway_client, **kwargs):
- response = apigateway_client.delete_integration(**kwargs)
- assert_response_is_200(response)
-
-
-def get_rest_api_integration(apigateway_client, **kwargs):
- response = apigateway_client.get_integration(**kwargs)
- assert_response_is_200(response)
-
-
def create_rest_api_method_response(apigateway_client, **kwargs):
response = apigateway_client.put_method_response(**kwargs)
assert_response_is_201(response)
@@ -115,17 +65,6 @@ def create_rest_api_integration_response(apigateway_client, **kwargs):
return response.get("statusCode")
-def create_domain_name(apigateway_client, **kwargs):
- response = apigateway_client.create_domain_name(**kwargs)
- assert_response_is_201(response)
-
-
-def create_base_path_mapping(apigateway_client, **kwargs):
- response = apigateway_client.create_base_path_mapping(**kwargs)
- assert_response_is_201(response)
- return response.get("basePath"), response.get("stage")
-
-
def create_rest_api_deployment(apigateway_client, **kwargs):
response = apigateway_client.create_deployment(**kwargs)
assert_response_is_201(response)
@@ -150,47 +89,6 @@ def update_rest_api_stage(apigateway_client, **kwargs):
return response.get("stageName")
-def create_cognito_user_pool(cognito_idp, **kwargs):
- response = cognito_idp.create_user_pool(**kwargs)
- assert_response_is_200(response)
- return response.get("UserPool").get("Id"), response.get("UserPool").get("Arn")
-
-
-def delete_cognito_user_pool(cognito_idp, **kwargs):
- response = cognito_idp.delete_user_pool(**kwargs)
- assert_response_is_200(response)
-
-
-def create_cognito_user_pool_client(cognito_idp, **kwargs):
- response = cognito_idp.create_user_pool_client(**kwargs)
- assert_response_is_200(response)
- return (
- response.get("UserPoolClient").get("ClientId"),
- response.get("UserPoolClient").get("ClientName"),
- )
-
-
-def create_cognito_user(cognito_idp, **kwargs):
- response = cognito_idp.sign_up(**kwargs)
- assert_response_is_200(response)
-
-
-def create_cognito_sign_up_confirmation(cognito_idp, **kwargs):
- response = cognito_idp.admin_confirm_sign_up(**kwargs)
- assert_response_is_200(response)
-
-
-def create_initiate_auth(cognito_idp, **kwargs):
- response = cognito_idp.initiate_auth(**kwargs)
- assert_response_is_200(response)
- return response.get("AuthenticationResult").get("IdToken")
-
-
-def delete_cognito_user_pool_client(cognito_idp, **kwargs):
- response = cognito_idp.delete_user_pool_client(**kwargs)
- assert_response_is_200(response)
-
-
#
# Common utilities
#
diff --git a/tests/aws/services/apigateway/conftest.py b/tests/aws/services/apigateway/conftest.py
index d593e084496d7..88ac5575de221 100644
--- a/tests/aws/services/apigateway/conftest.py
+++ b/tests/aws/services/apigateway/conftest.py
@@ -13,7 +13,6 @@
create_rest_api_stage,
create_rest_resource,
create_rest_resource_method,
- delete_rest_api,
import_rest_api,
)
from tests.aws.services.lambda_.test_lambda import TEST_LAMBDA_PYTHON_ECHO_STATUS_CODE
@@ -232,7 +231,7 @@ def _import_apigateway_function(*args, **kwargs):
yield _import_apigateway_function
for rest_api_id in rest_api_ids:
- delete_rest_api(apigateway_client, restApiId=rest_api_id)
+ apigateway_client.delete_rest_api(restApiId=rest_api_id)
@pytest.fixture
diff --git a/tests/aws/services/apigateway/test_apigateway_api.py b/tests/aws/services/apigateway/test_apigateway_api.py
index 847fca937bcc4..2ae1dc9571811 100644
--- a/tests/aws/services/apigateway/test_apigateway_api.py
+++ b/tests/aws/services/apigateway/test_apigateway_api.py
@@ -10,7 +10,6 @@
from localstack_snapshot.snapshots.transformer import KeyValueBasedTransformer, SortingTransformer
from localstack.aws.api.apigateway import PutMode
-from localstack.constants import TAG_KEY_CUSTOM_ID
from localstack.testing.aws.util import is_aws_cloud
from localstack.testing.pytest import markers
from localstack.utils.files import load_file
@@ -200,15 +199,6 @@ def test_create_rest_api_with_tags(self, apigw_create_rest_api, snapshot, aws_cl
response = aws_client.apigateway.get_rest_apis()
snapshot.match("get-rest-apis-w-tags", response)
- @markers.aws.only_localstack
- def test_create_rest_api_with_custom_id_tag(self, apigw_create_rest_api):
- custom_id_tag = "testid123"
- response = apigw_create_rest_api(
- name="my_api", description="this is my api", tags={TAG_KEY_CUSTOM_ID: custom_id_tag}
- )
- api_id = response["id"]
- assert api_id == custom_id_tag
-
@markers.aws.validated
def test_update_rest_api_operation_add_remove(
self, apigw_create_rest_api, snapshot, aws_client
@@ -2320,6 +2310,7 @@ def test_invoke_test_method(self, create_rest_apigw, snapshot, aws_client):
lambda k, v: str(v) if k == "latency" else None, "latency", replace_reference=False
)
)
+ # TODO: maybe transformer `log` better
snapshot.add_transformer(
snapshot.transform.key_value("log", "log", reference_replacement=False)
)
diff --git a/tests/aws/services/apigateway/test_apigateway_basic.py b/tests/aws/services/apigateway/test_apigateway_basic.py
index ef984d8c99975..ec03c2b1612bb 100644
--- a/tests/aws/services/apigateway/test_apigateway_basic.py
+++ b/tests/aws/services/apigateway/test_apigateway_basic.py
@@ -54,8 +54,6 @@
create_rest_api_stage,
create_rest_resource,
create_rest_resource_method,
- delete_rest_api,
- get_rest_api,
update_rest_api_deployment,
update_rest_api_stage,
)
@@ -80,7 +78,6 @@
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_SWAGGER_FILE_JSON = os.path.join(THIS_FOLDER, "../../files/swagger.json")
TEST_SWAGGER_FILE_YAML = os.path.join(THIS_FOLDER, "../../files/swagger.yaml")
-TEST_IMPORT_REST_API_FILE = os.path.join(THIS_FOLDER, "../../files/pets.json")
TEST_IMPORT_MOCK_INTEGRATION = os.path.join(THIS_FOLDER, "../../files/openapi-mock.json")
TEST_IMPORT_REST_API_ASYNC_LAMBDA = os.path.join(THIS_FOLDER, "../../files/api_definition.yaml")
@@ -149,9 +146,8 @@ def test_create_rest_api_with_custom_id(self, create_rest_apigw, url_function, a
api_id, name, _ = create_rest_apigw(name=apigw_name, tags={TAG_KEY_CUSTOM_ID: test_id})
assert test_id == api_id
assert apigw_name == name
- api_id, name = get_rest_api(aws_client.apigateway, restApiId=test_id)
- assert test_id == api_id
- assert apigw_name == name
+ response = aws_client.apigateway.get_rest_api(restApiId=test_id)
+ assert response["name"] == apigw_name
spec_file = load_file(TEST_IMPORT_MOCK_INTEGRATION)
aws_client.apigateway.put_rest_api(restApiId=test_id, body=spec_file, mode="overwrite")
@@ -1207,6 +1203,20 @@ def invoke_api():
@markers.aws.validated
@markers.snapshot.skip_snapshot_verify(
+ paths=[
+ # the Endpoint URI is wrong for AWS_PROXY because AWS resolves it to the Lambda HTTP endpoint and we keep
+ # the ARN
+ "$..log.line07",
+ "$..log.line10",
+ # AWS is returning the AWS_PROXY invoke response headers even though they are not considered at all (only
+ # the lambda payload headers are considered, so this is unhelpful)
+ "$..log.line12",
+ # LocalStack does not setup headers the same way when invoking the lambda (Token, additional headers...)
+ "$..log.line08",
+ ]
+ )
+ @markers.snapshot.skip_snapshot_verify(
+ condition=lambda: not is_next_gen_api(),
paths=[
"$..headers.Content-Length",
"$..headers.Content-Type",
@@ -1216,7 +1226,7 @@ def invoke_api():
"$..multiValueHeaders.Content-Length",
"$..multiValueHeaders.Content-Type",
"$..multiValueHeaders.X-Amzn-Trace-Id",
- ]
+ ],
)
def test_apigw_test_invoke_method_api(
self,
@@ -1227,6 +1237,41 @@ def test_apigw_test_invoke_method_api(
region_name,
snapshot,
):
+ snapshot.add_transformers_list(
+ [
+ snapshot.transform.key_value(
+ "latency", value_replacement="", reference_replacement=False
+ ),
+ snapshot.transform.jsonpath(
+ "$..headers.X-Amzn-Trace-Id", value_replacement="x-amz-trace-id"
+ ),
+ snapshot.transform.regex(
+ r"URI: https:\/\/.*?\/2015-03-31", "URI: https:///2015-03-31"
+ ),
+ snapshot.transform.regex(
+ r"Integration latency: \d*? ms", "Integration latency: ms"
+ ),
+ snapshot.transform.regex(
+ r"Date=[a-zA-Z]{3},\s\d{2}\s[a-zA-Z]{3}\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT",
+ "Date=Day, dd MMM yyyy hh:mm:ss GMT",
+ ),
+ snapshot.transform.regex(
+ r"x-amzn-RequestId=[a-f0-9-]{36}", "x-amzn-RequestId="
+ ),
+ snapshot.transform.regex(
+ r"[a-zA-Z]{3}\s[a-zA-Z]{3}\s\d{2}\s\d{2}:\d{2}:\d{2}\sUTC\s\d{4} :",
+ "DDD MMM dd hh:mm:ss UTC yyyy :",
+ ),
+ snapshot.transform.regex(
+ r"Authorization=.*?,", "Authorization=,"
+ ),
+ snapshot.transform.regex(
+ r"X-Amz-Security-Token=.*?\s\[", "X-Amz-Security-Token= ["
+ ),
+ snapshot.transform.regex(r"\d{8}T\d{6}Z", ""),
+ ]
+ )
+
_, role_arn = create_role_with_policy(
"Allow", "lambda:InvokeFunction", json.dumps(APIGATEWAY_ASSUME_ROLE_POLICY), "*"
)
@@ -1238,14 +1283,17 @@ def test_apigw_test_invoke_method_api(
handler="lambda_handler.handler",
runtime=Runtime.nodejs18_x,
)
+ snapshot.add_transformer(snapshot.transform.regex(function_name, ""))
lambda_arn = create_function_response["CreateFunctionResponse"]["FunctionArn"]
target_uri = arns.apigateway_invocations_arn(lambda_arn, region_name)
# create REST API and test resource
rest_api_id, _, root = create_rest_apigw(name=f"test-{short_uid()}")
- resource_id, _ = create_rest_resource(
- aws_client.apigateway, restApiId=rest_api_id, parentId=root, pathPart="foo"
+ snapshot.add_transformer(snapshot.transform.regex(rest_api_id, ""))
+ resource = aws_client.apigateway.create_resource(
+ restApiId=rest_api_id, parentId=root, pathPart="foo"
)
+ resource_id = resource["id"]
# create method and integration
aws_client.apigateway.put_method(
@@ -1263,8 +1311,7 @@ def test_apigw_test_invoke_method_api(
uri=target_uri,
credentials=role_arn,
)
- status_code = create_rest_api_method_response(
- aws_client.apigateway,
+ aws_client.apigateway.put_method_response(
restApiId=rest_api_id,
resourceId=resource_id,
httpMethod="GET",
@@ -1274,46 +1321,64 @@ def test_apigw_test_invoke_method_api(
restApiId=rest_api_id,
resourceId=resource_id,
httpMethod="GET",
- statusCode=status_code,
+ statusCode="200",
selectionPattern="",
)
- deployment_id, _ = create_rest_api_deployment(aws_client.apigateway, restApiId=rest_api_id)
- create_rest_api_stage(
- aws_client.apigateway,
- restApiId=rest_api_id,
- stageName="local",
- deploymentId=deployment_id,
- )
+ aws_client.apigateway.create_deployment(restApiId=rest_api_id, stageName="local")
# run test_invoke_method API #1
- def test_invoke_call():
- response = aws_client.apigateway.test_invoke_method(
+ def _test_invoke_call(
+ path_with_qs: str, body: str | None = None, headers: dict | None = None
+ ):
+ kwargs = {}
+ if body:
+ kwargs["body"] = body
+ if headers:
+ kwargs["headers"] = headers
+ _response = aws_client.apigateway.test_invoke_method(
restApiId=rest_api_id,
resourceId=resource_id,
httpMethod="GET",
- pathWithQueryString="/foo",
+ pathWithQueryString=path_with_qs,
+ **kwargs,
)
- assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
- assert 200 == response.get("status")
- assert "response from" in json.loads(response.get("body")).get("body")
- snapshot.match("test_invoke_method_response", response)
+ assert _response.get("status") == 200
+ assert "response from" in json.loads(_response.get("body")).get("body")
+ return _response
+
+ invoke_simple = retry(_test_invoke_call, retries=15, sleep=1, path_with_qs="/foo")
- retry(test_invoke_call, retries=15, sleep=1)
+ def _transform_log(_log: str) -> dict[str, str]:
+ return {f"line{index:02d}": line for index, line in enumerate(_log.split("\n"))}
+
+ # we want to do very precise matching on the log, and splitting on new lines will help in case the snapshot
+ # fails
+ # the snapshot library does not allow to ignore an array index as the last node, so we need to put it in a dict
+ invoke_simple["log"] = _transform_log(invoke_simple["log"])
+ request_id_1 = invoke_simple["log"]["line00"].split(" ")[-1]
+ snapshot.add_transformer(
+ snapshot.transform.regex(request_id_1, ""), priority=-1
+ )
+ snapshot.match("test_invoke_method_response", invoke_simple)
# run test_invoke_method API #2
- response = aws_client.apigateway.test_invoke_method(
- restApiId=rest_api_id,
- resourceId=resource_id,
- httpMethod="GET",
- pathWithQueryString="/foo",
+ invoke_with_parameters = retry(
+ _test_invoke_call,
+ retries=15,
+ sleep=1,
+ path_with_qs="/foo?queryTest=value",
body='{"test": "val123"}',
headers={"content-type": "application/json"},
)
- assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
- assert 200 == response.get("status")
- assert "response from" in json.loads(response.get("body")).get("body")
- assert "val123" in json.loads(response.get("body")).get("body")
- snapshot.match("test_invoke_method_response_with_body", response)
+ response_body = json.loads(invoke_with_parameters.get("body")).get("body")
+ assert "response from" in response_body
+ assert "val123" in response_body
+ invoke_with_parameters["log"] = _transform_log(invoke_with_parameters["log"])
+ request_id_2 = invoke_with_parameters["log"]["line00"].split(" ")[-1]
+ snapshot.add_transformer(
+ snapshot.transform.regex(request_id_2, ""), priority=-1
+ )
+ snapshot.match("test_invoke_method_response_with_body", invoke_with_parameters)
@markers.aws.validated
@pytest.mark.parametrize("stage_name", ["local", "dev"])
@@ -1631,9 +1696,8 @@ def _invoke_url(url):
api_us_id, stage=stage_name, path="/demo", region="us-west-1", url_type=url_type
)
retry(_invoke_url, retries=20, sleep=2, url=endpoint)
-
- delete_rest_api(apigateway_client_eu, restApiId=api_eu_id)
- delete_rest_api(apigateway_client_us, restApiId=api_us_id)
+ apigateway_client_eu.delete_rest_api(restApiId=api_eu_id)
+ apigateway_client_us.delete_rest_api(restApiId=api_us_id)
class TestIntegrations:
diff --git a/tests/aws/services/apigateway/test_apigateway_basic.snapshot.json b/tests/aws/services/apigateway/test_apigateway_basic.snapshot.json
index 51574dc79b97c..4cdbcb8e1e311 100644
--- a/tests/aws/services/apigateway/test_apigateway_basic.snapshot.json
+++ b/tests/aws/services/apigateway/test_apigateway_basic.snapshot.json
@@ -1,6 +1,6 @@
{
"tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_apigw_test_invoke_method_api": {
- "recorded-date": "04-02-2024, 18:48:24",
+ "recorded-date": "11-04-2025, 18:02:16",
"recorded-content": {
"test_invoke_method_response": {
"body": {
@@ -11,16 +11,36 @@
},
"headers": {
"Content-Type": "application/json",
- "X-Amzn-Trace-Id": "Root=1-65bfdbf7-1b5920a5a0a57e32194306b3;Parent=5c9925637b7d89fa;Sampled=0;lineage=59cc7ee1:0"
+ "X-Amzn-Trace-Id": ""
+ },
+ "latency": "",
+ "log": {
+ "line00": "Execution log for request ",
+ "line01": "DDD MMM dd hh:mm:ss UTC yyyy : Starting execution for request: ",
+ "line02": "DDD MMM dd hh:mm:ss UTC yyyy : HTTP Method: GET, Resource Path: /foo",
+ "line03": "DDD MMM dd hh:mm:ss UTC yyyy : Method request path: {}",
+ "line04": "DDD MMM dd hh:mm:ss UTC yyyy : Method request query string: {}",
+ "line05": "DDD MMM dd hh:mm:ss UTC yyyy : Method request headers: {}",
+ "line06": "DDD MMM dd hh:mm:ss UTC yyyy : Method request body before transformations: ",
+ "line07": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint request URI: https:///2015-03-31/functions/arn::lambda::111111111111:function:/invocations",
+ "line08": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint request headers: {x-amzn-lambda-integration-tag=, Authorization=, X-Amz-Date=, x-amzn-apigateway-api-id=, Accept=application/json, User-Agent=AmazonAPIGateway_, X-Amz-Security-Token= [TRUNCATED]",
+ "line09": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint request body after transformations: ",
+ "line10": "DDD MMM dd hh:mm:ss UTC yyyy : Sending request to https://lambda..amazonaws.com/2015-03-31/functions/arn::lambda::111111111111:function:/invocations",
+ "line11": "DDD MMM dd hh:mm:ss UTC yyyy : Received response. Status: 200, Integration latency: ms",
+ "line12": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint response headers: {Date=Day, dd MMM yyyy hh:mm:ss GMT, Content-Type=application/json, Content-Length=104, Connection=keep-alive, x-amzn-RequestId=, x-amzn-Remapped-Content-Length=0, X-Amz-Executed-Version=$LATEST, X-Amzn-Trace-Id=}",
+ "line13": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint response body before transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}",
+ "line14": "DDD MMM dd hh:mm:ss UTC yyyy : Method response body after transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}",
+ "line15": "DDD MMM dd hh:mm:ss UTC yyyy : Method response headers: {X-Amzn-Trace-Id=, Content-Type=application/json}",
+ "line16": "DDD MMM dd hh:mm:ss UTC yyyy : Successfully completed execution",
+ "line17": "DDD MMM dd hh:mm:ss UTC yyyy : Method completed with status: 200",
+ "line18": ""
},
- "latency": 394,
- "log": "Execution log for request d09d726b-32a3-42fc-87c7-42ac58bca845\nSun Feb 04 18:48:23 UTC 2024 : Starting execution for request: d09d726b-32a3-42fc-87c7-42ac58bca845\nSun Feb 04 18:48:23 UTC 2024 : HTTP Method: GET, Resource Path: /foo\nSun Feb 04 18:48:23 UTC 2024 : Method request path: {}\nSun Feb 04 18:48:23 UTC 2024 : Method request query string: {}\nSun Feb 04 18:48:23 UTC 2024 : Method request headers: {}\nSun Feb 04 18:48:23 UTC 2024 : Method request body before transformations: \nSun Feb 04 18:48:23 UTC 2024 : Endpoint request URI: https://lambda..amazonaws.com/2015-03-31/functions/arn::lambda::111111111111:function:test-de2a8789/invocations\nSun Feb 04 18:48:23 UTC 2024 : Endpoint request headers: {x-amzn-lambda-integration-tag=d09d726b-32a3-42fc-87c7-42ac58bca845, Authorization=*********************************************************************************************************************************************************************************************************************************************************************fd20ad, X-Amz-Date=20240204T184823Z, x-amzn-apigateway-api-id=96m844vit9, Accept=application/json, User-Agent=AmazonAPIGateway_96m844vit9, X-Amz-Security-Token=IQoJb3JpZ2luX2VjEMv//////////wEaCXVzLWVhc3QtMSJHMEUCIQDH/nm1y4gMfoEBmxGW3/Tvqy4n6O3lzViNg021ao2NOQIgXFf6aGDn2L5egYErKkRsBaOKEvTn/jpaZgmTjAGO1BEq7gIIlP//////////ARACGgw2NTk2NzY4MjExMTgiDGZzbbOVj3R7zPeswyrCAtEzQYGuVCS1ylMX93oVtpfyXNQx3ZLeknme7FtyuuFFuzM2lU+a3C4ykL4j8qQmT8nFXdfX7ZzLCLmRjr1EhTgPrh7SE5XSxfBQdxTQxkoaGImnDRbceKLPxSMALrub+owhkfeZT29laOyBzPdttLM7iG7Q/bws/ywC0I8HMJA4Dl5KHMhiKDBncYXjdYhlHCSPb+qN/5cZ1Wm+jUV/znw6RG8Hhz+mKzFDckbVItiRD+CdbP5V3IjVZgtzSvwXqN8EXN9R0tRXE+b0FD7AUMctWoDbCqkIHf [TRUNCATED]\nSun Feb 04 18:48:23 UTC 2024 : Endpoint request body after transformations: \nSun Feb 04 18:48:23 UTC 2024 : Sending request to https://lambda..amazonaws.com/2015-03-31/functions/arn::lambda::111111111111:function:test-de2a8789/invocations\nSun Feb 04 18:48:24 UTC 2024 : Received response. Status: 200, Integration latency: 356 ms\nSun Feb 04 18:48:24 UTC 2024 : Endpoint response headers: {Date=Sun, 04 Feb 2024 18:48:24 GMT, Content-Type=application/json, Content-Length=104, Connection=keep-alive, x-amzn-RequestId=20a0cc6d-ade0-417f-853d-04c72dbe23d6, x-amzn-Remapped-Content-Length=0, X-Amz-Executed-Version=$LATEST, X-Amzn-Trace-Id=root=1-65bfdbf7-1b5920a5a0a57e32194306b3;parent=5c9925637b7d89fa;sampled=0;lineage=59cc7ee1:0}\nSun Feb 04 18:48:24 UTC 2024 : Endpoint response body before transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}\nSun Feb 04 18:48:24 UTC 2024 : Method response body after transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}\nSun Feb 04 18:48:24 UTC 2024 : Method response headers: {X-Amzn-Trace-Id=Root=1-65bfdbf7-1b5920a5a0a57e32194306b3;Parent=5c9925637b7d89fa;Sampled=0;lineage=59cc7ee1:0, Content-Type=application/json}\nSun Feb 04 18:48:24 UTC 2024 : Successfully completed execution\nSun Feb 04 18:48:24 UTC 2024 : Method completed with status: 200\n",
"multiValueHeaders": {
"Content-Type": [
"application/json"
],
"X-Amzn-Trace-Id": [
- "Root=1-65bfdbf7-1b5920a5a0a57e32194306b3;Parent=5c9925637b7d89fa;Sampled=0;lineage=59cc7ee1:0"
+ ""
]
},
"status": 200,
@@ -38,16 +58,36 @@
},
"headers": {
"Content-Type": "application/json",
- "X-Amzn-Trace-Id": "Root=1-65bfdbf8-caa70673935f456b40debcda;Parent=0f5819866f6639ce;Sampled=0;lineage=59cc7ee1:0"
+ "X-Amzn-Trace-Id": ""
+ },
+ "latency": "",
+ "log": {
+ "line00": "Execution log for request ",
+ "line01": "DDD MMM dd hh:mm:ss UTC yyyy : Starting execution for request: ",
+ "line02": "DDD MMM dd hh:mm:ss UTC yyyy : HTTP Method: GET, Resource Path: /foo",
+ "line03": "DDD MMM dd hh:mm:ss UTC yyyy : Method request path: {}",
+ "line04": "DDD MMM dd hh:mm:ss UTC yyyy : Method request query string: {queryTest=value}",
+ "line05": "DDD MMM dd hh:mm:ss UTC yyyy : Method request headers: {content-type=application/json}",
+ "line06": "DDD MMM dd hh:mm:ss UTC yyyy : Method request body before transformations: {\"test\": \"val123\"}",
+ "line07": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint request URI: https:///2015-03-31/functions/arn::lambda::111111111111:function:/invocations",
+ "line08": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint request headers: {x-amzn-lambda-integration-tag=, Authorization=, X-Amz-Date=, x-amzn-apigateway-api-id=, Accept=application/json, User-Agent=AmazonAPIGateway_, X-Amz-Security-Token= [TRUNCATED]",
+ "line09": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint request body after transformations: {\"test\": \"val123\"}",
+ "line10": "DDD MMM dd hh:mm:ss UTC yyyy : Sending request to https://lambda..amazonaws.com/2015-03-31/functions/arn::lambda::111111111111:function:/invocations",
+ "line11": "DDD MMM dd hh:mm:ss UTC yyyy : Received response. Status: 200, Integration latency: ms",
+ "line12": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint response headers: {Date=Day, dd MMM yyyy hh:mm:ss GMT, Content-Type=application/json, Content-Length=131, Connection=keep-alive, x-amzn-RequestId=, x-amzn-Remapped-Content-Length=0, X-Amz-Executed-Version=$LATEST, X-Amzn-Trace-Id=}",
+ "line13": "DDD MMM dd hh:mm:ss UTC yyyy : Endpoint response body before transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {\\\\\\\"test\\\\\\\":\\\\\\\"val123\\\\\\\"}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}",
+ "line14": "DDD MMM dd hh:mm:ss UTC yyyy : Method response body after transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {\\\\\\\"test\\\\\\\":\\\\\\\"val123\\\\\\\"}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}",
+ "line15": "DDD MMM dd hh:mm:ss UTC yyyy : Method response headers: {X-Amzn-Trace-Id=, Content-Type=application/json}",
+ "line16": "DDD MMM dd hh:mm:ss UTC yyyy : Successfully completed execution",
+ "line17": "DDD MMM dd hh:mm:ss UTC yyyy : Method completed with status: 200",
+ "line18": ""
},
- "latency": 62,
- "log": "Execution log for request 63ecf43a-1b6e-40ef-80b7-98c5b7484ec9\nSun Feb 04 18:48:24 UTC 2024 : Starting execution for request: 63ecf43a-1b6e-40ef-80b7-98c5b7484ec9\nSun Feb 04 18:48:24 UTC 2024 : HTTP Method: GET, Resource Path: /foo\nSun Feb 04 18:48:24 UTC 2024 : Method request path: {}\nSun Feb 04 18:48:24 UTC 2024 : Method request query string: {}\nSun Feb 04 18:48:24 UTC 2024 : Method request headers: {content-type=application/json}\nSun Feb 04 18:48:24 UTC 2024 : Method request body before transformations: {\"test\": \"val123\"}\nSun Feb 04 18:48:24 UTC 2024 : Endpoint request URI: https://lambda..amazonaws.com/2015-03-31/functions/arn::lambda::111111111111:function:test-de2a8789/invocations\nSun Feb 04 18:48:24 UTC 2024 : Endpoint request headers: {x-amzn-lambda-integration-tag=63ecf43a-1b6e-40ef-80b7-98c5b7484ec9, Authorization=*******************************************************************************************************************************************************************************************************************************************************************************************************4b5ad4, X-Amz-Date=20240204T184824Z, x-amzn-apigateway-api-id=96m844vit9, Accept=application/json, User-Agent=AmazonAPIGateway_96m844vit9, X-Amz-Security-Token=IQoJb3JpZ2luX2VjEMv//////////wEaCXVzLWVhc3QtMSJIMEYCIQCX8aMq+Q5P6zw4SzP7nSzzMTzd2D0tbCwx9jyQnWiiSgIhAKevG8f4Qo1O/lr+A17AujqFg9AqJCIB5zNu+g8RZFl+Ku4CCJT//////////wEQAhoMNjU5Njc2ODIxMTE4IgxyHR1NVV6IvXrBrD8qwgJNyGLqGkyhoWFD36VE4ENpEW9PzKtbnKkQq/tqZdBBSwvzTmANSNEE7dIpiTolgXGMN4llNaV9CNYF+Ro/zXmsY4u/y8HgSFnTst/iOam+hEGQEr9BEflhu1Sqy7xqBt5pfIVscdpPNVsdX0OLKDT98v3pTRUnilsMDK/6F4wzl4SJ8mQ4vYqCN5mh6n+96Ze2Q0ldYEDjbBmMItgyDk2so2OxMiVPtrhJ81u7NYsEYdmgQ5dve3rQYT7+oVnA [TRUNCATED]\nSun Feb 04 18:48:24 UTC 2024 : Endpoint request body after transformations: {\"test\": \"val123\"}\nSun Feb 04 18:48:24 UTC 2024 : Sending request to https://lambda..amazonaws.com/2015-03-31/functions/arn::lambda::111111111111:function:test-de2a8789/invocations\nSun Feb 04 18:48:24 UTC 2024 : Received response. Status: 200, Integration latency: 25 ms\nSun Feb 04 18:48:24 UTC 2024 : Endpoint response headers: {Date=Sun, 04 Feb 2024 18:48:24 GMT, Content-Type=application/json, Content-Length=131, Connection=keep-alive, x-amzn-RequestId=57dc53e3-bc2e-449b-83ef-fd7d97479909, x-amzn-Remapped-Content-Length=0, X-Amz-Executed-Version=$LATEST, X-Amzn-Trace-Id=root=1-65bfdbf8-caa70673935f456b40debcda;parent=0f5819866f6639ce;sampled=0;lineage=59cc7ee1:0}\nSun Feb 04 18:48:24 UTC 2024 : Endpoint response body before transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {\\\\\\\"test\\\\\\\":\\\\\\\"val123\\\\\\\"}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}\nSun Feb 04 18:48:24 UTC 2024 : Method response body after transformations: {\"statusCode\":200,\"body\":\"\\\"response from localstack lambda: {\\\\\\\"test\\\\\\\":\\\\\\\"val123\\\\\\\"}\\\"\",\"isBase64Encoded\":false,\"headers\":{}}\nSun Feb 04 18:48:24 UTC 2024 : Method response headers: {X-Amzn-Trace-Id=Root=1-65bfdbf8-caa70673935f456b40debcda;Parent=0f5819866f6639ce;Sampled=0;lineage=59cc7ee1:0, Content-Type=application/json}\nSun Feb 04 18:48:24 UTC 2024 : Successfully completed execution\nSun Feb 04 18:48:24 UTC 2024 : Method completed with status: 200\n",
"multiValueHeaders": {
"Content-Type": [
"application/json"
],
"X-Amzn-Trace-Id": [
- "Root=1-65bfdbf8-caa70673935f456b40debcda;Parent=0f5819866f6639ce;Sampled=0;lineage=59cc7ee1:0"
+ ""
]
},
"status": 200,
diff --git a/tests/aws/services/apigateway/test_apigateway_basic.validation.json b/tests/aws/services/apigateway/test_apigateway_basic.validation.json
index cbb19a133ecf2..43de03144651a 100644
--- a/tests/aws/services/apigateway/test_apigateway_basic.validation.json
+++ b/tests/aws/services/apigateway/test_apigateway_basic.validation.json
@@ -15,7 +15,7 @@
"last_validated_date": "2024-07-12T20:04:15+00:00"
},
"tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_apigw_test_invoke_method_api": {
- "last_validated_date": "2024-02-04T18:48:24+00:00"
+ "last_validated_date": "2025-04-11T18:03:13+00:00"
},
"tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_update_rest_api_deployment": {
"last_validated_date": "2024-04-12T21:24:49+00:00"
diff --git a/tests/aws/services/apigateway/test_apigateway_common.py b/tests/aws/services/apigateway/test_apigateway_common.py
index b0477593c8241..c585df9dcb05d 100644
--- a/tests/aws/services/apigateway/test_apigateway_common.py
+++ b/tests/aws/services/apigateway/test_apigateway_common.py
@@ -8,6 +8,7 @@
from botocore.exceptions import ClientError
from localstack.aws.api.lambda_ import Runtime
+from localstack.constants import TAG_KEY_CUSTOM_ID
from localstack.testing.aws.util import is_aws_cloud
from localstack.testing.pytest import markers
from localstack.utils.aws.arns import get_partition, parse_arn
@@ -1787,3 +1788,34 @@ def test_api_not_existing(self, aws_client, create_rest_apigw, snapshot):
assert _response.json() == {
"message": "The API id '404api' does not correspond to a deployed API Gateway API"
}
+
+ @markers.aws.only_localstack
+ def test_routing_with_custom_api_id(self, aws_client, create_rest_apigw):
+ custom_id = "custom-api-id"
+ api_id, _, root_id = create_rest_apigw(
+ name="test custom id routing", tags={TAG_KEY_CUSTOM_ID: custom_id}
+ )
+
+ resource = aws_client.apigateway.create_resource(
+ restApiId=api_id, parentId=root_id, pathPart="part1"
+ )
+ hardcoded_resource_id = resource["id"]
+
+ response_template_get = {"statusCode": 200, "message": "routing ok"}
+ _create_mock_integration_with_200_response_template(
+ aws_client, api_id, hardcoded_resource_id, "GET", response_template_get
+ )
+
+ stage_name = "dev"
+ aws_client.apigateway.create_deployment(restApiId=api_id, stageName=stage_name)
+
+ url = api_invoke_url(api_id=api_id, stage=stage_name, path="/part1")
+ response = requests.get(url)
+ assert response.ok
+ assert response.json()["message"] == "routing ok"
+
+ # Validated test living here: `test_create_execute_api_vpc_endpoint`
+ vpce_url = url.replace(custom_id, f"{custom_id}-vpce-aabbaabbaabbaabba")
+ response = requests.get(vpce_url)
+ assert response.ok
+ assert response.json()["message"] == "routing ok"
diff --git a/tests/aws/services/apigateway/test_apigateway_extended.py b/tests/aws/services/apigateway/test_apigateway_extended.py
index 54a253fc8febe..c95965db241c1 100644
--- a/tests/aws/services/apigateway/test_apigateway_extended.py
+++ b/tests/aws/services/apigateway/test_apigateway_extended.py
@@ -43,7 +43,13 @@ def _create(**kwargs):
[TEST_IMPORT_PETSTORE_SWAGGER, TEST_IMPORT_PETS],
ids=["TEST_IMPORT_PETSTORE_SWAGGER", "TEST_IMPORT_PETS"],
)
-@markers.snapshot.skip_snapshot_verify(paths=["$..body.host"])
+@markers.snapshot.skip_snapshot_verify(
+ paths=[
+ "$..body.host",
+ # TODO: not returned by LS
+ "$..endpointConfiguration.ipAddressType",
+ ]
+)
def test_export_swagger_openapi(aws_client, snapshot, import_apigw, import_file, region_name):
snapshot.add_transformer(
[
@@ -82,7 +88,13 @@ def test_export_swagger_openapi(aws_client, snapshot, import_apigw, import_file,
[TEST_IMPORT_PETSTORE_SWAGGER, TEST_IMPORT_PETS],
ids=["TEST_IMPORT_PETSTORE_SWAGGER", "TEST_IMPORT_PETS"],
)
-@markers.snapshot.skip_snapshot_verify(paths=["$..body.servers..url"])
+@markers.snapshot.skip_snapshot_verify(
+ paths=[
+ "$..body.servers..url",
+ # TODO: not returned by LS
+ "$..endpointConfiguration.ipAddressType",
+ ]
+)
def test_export_oas30_openapi(aws_client, snapshot, import_apigw, region_name, import_file):
snapshot.add_transformer(
[
diff --git a/tests/aws/services/apigateway/test_apigateway_extended.snapshot.json b/tests/aws/services/apigateway/test_apigateway_extended.snapshot.json
index efdbdcbccf8f0..76db5eff4a01b 100644
--- a/tests/aws/services/apigateway/test_apigateway_extended.snapshot.json
+++ b/tests/aws/services/apigateway/test_apigateway_extended.snapshot.json
@@ -1,6 +1,6 @@
{
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_swagger_openapi[TEST_IMPORT_PETSTORE_SWAGGER]": {
- "recorded-date": "15-04-2024, 21:43:25",
+ "recorded-date": "06-05-2025, 18:20:26",
"recorded-content": {
"import-api": {
"apiKeySource": "HEADER",
@@ -8,6 +8,7 @@
"description": "Your first API with Amazon API Gateway. This is a sample API that integrates via HTTP with our demo Pet Store endpoints",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -638,13 +639,18 @@
}
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_swagger_openapi[TEST_IMPORT_PETS]": {
- "recorded-date": "15-04-2024, 21:43:56",
+ "recorded-date": "06-05-2025, 18:21:08",
"recorded-content": {
"import-api": {
"apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/png",
+ "image/jpg"
+ ],
"createdDate": "datetime",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -727,6 +733,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "http",
"httpMethod": "GET",
"uri": "http://petstore-demo-endpoint.execute-api.com/petstore/pets",
"responses": {
@@ -734,8 +741,7 @@
"statusCode": "200"
}
},
- "passthroughBehavior": "when_no_match",
- "type": "http"
+ "passthroughBehavior": "when_no_match"
}
}
},
@@ -755,6 +761,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "http",
"httpMethod": "GET",
"uri": "http://petstore-demo-endpoint.execute-api.com/petstore/pets/{id}",
"responses": {
@@ -765,12 +772,15 @@
"requestParameters": {
"integration.request.path.id": "method.request.path.petId"
},
- "passthroughBehavior": "when_no_match",
- "type": "http"
+ "passthroughBehavior": "when_no_match"
}
}
}
- }
+ },
+ "x-amazon-apigateway-binary-media-types": [
+ "image/png",
+ "image/jpg"
+ ]
},
"contentDisposition": "attachment; filename=\"swagger_1.0.0.json\"",
"contentType": "application/octet-stream",
@@ -782,7 +792,7 @@
}
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_oas30_openapi[TEST_IMPORT_PETSTORE_SWAGGER]": {
- "recorded-date": "15-04-2024, 21:45:03",
+ "recorded-date": "06-05-2025, 18:34:11",
"recorded-content": {
"import-api": {
"apiKeySource": "HEADER",
@@ -790,6 +800,7 @@
"description": "Your first API with Amazon API Gateway. This is a sample API that integrates via HTTP with our demo Pet Store endpoints",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -1140,6 +1151,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "http",
"httpMethod": "GET",
"uri": "http://petstore.execute-api..amazonaws.com/petstore/pets",
"responses": {
@@ -1154,8 +1166,7 @@
"integration.request.querystring.page": "method.request.querystring.page",
"integration.request.querystring.type": "method.request.querystring.type"
},
- "passthroughBehavior": "when_no_match",
- "type": "http"
+ "passthroughBehavior": "when_no_match"
}
},
"post": {
@@ -1190,6 +1201,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "http",
"httpMethod": "POST",
"uri": "http://petstore.execute-api..amazonaws.com/petstore/pets",
"responses": {
@@ -1200,8 +1212,7 @@
}
}
},
- "passthroughBehavior": "when_no_match",
- "type": "http"
+ "passthroughBehavior": "when_no_match"
}
},
"options": {
@@ -1235,6 +1246,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "mock",
"responses": {
"default": {
"statusCode": "200",
@@ -1248,8 +1260,7 @@
"requestTemplates": {
"application/json": "{\"statusCode\": 200}"
},
- "passthroughBehavior": "when_no_match",
- "type": "mock"
+ "passthroughBehavior": "when_no_match"
}
}
},
@@ -1286,6 +1297,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "http",
"httpMethod": "GET",
"uri": "http://petstore.execute-api..amazonaws.com/petstore/pets/{petId}",
"responses": {
@@ -1299,8 +1311,7 @@
"requestParameters": {
"integration.request.path.petId": "method.request.path.petId"
},
- "passthroughBehavior": "when_no_match",
- "type": "http"
+ "passthroughBehavior": "when_no_match"
}
},
"options": {
@@ -1344,6 +1355,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "mock",
"responses": {
"default": {
"statusCode": "200",
@@ -1357,8 +1369,7 @@
"requestTemplates": {
"application/json": "{\"statusCode\": 200}"
},
- "passthroughBehavior": "when_no_match",
- "type": "mock"
+ "passthroughBehavior": "when_no_match"
}
}
},
@@ -1378,6 +1389,7 @@
}
},
"x-amazon-apigateway-integration": {
+ "type": "mock",
"responses": {
"default": {
"statusCode": "200",
@@ -1392,8 +1404,7 @@
"requestTemplates": {
"application/json": "{\"statusCode\": 200}"
},
- "passthroughBehavior": "when_no_match",
- "type": "mock"
+ "passthroughBehavior": "when_no_match"
}
}
}
@@ -1468,13 +1479,18 @@
}
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_oas30_openapi[TEST_IMPORT_PETS]": {
- "recorded-date": "15-04-2024, 21:45:07",
+ "recorded-date": "06-05-2025, 18:34:49",
"recorded-content": {
"import-api": {
"apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/png",
+ "image/jpg"
+ ],
"createdDate": "datetime",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -1620,7 +1636,11 @@
}
}
},
- "components": {}
+ "components": {},
+ "x-amazon-apigateway-binary-media-types": [
+ "image/png",
+ "image/jpg"
+ ]
},
"contentDisposition": "attachment; filename=\"oas30_1.0.0.json\"",
"contentType": "application/octet-stream",
diff --git a/tests/aws/services/apigateway/test_apigateway_extended.validation.json b/tests/aws/services/apigateway/test_apigateway_extended.validation.json
index f4b5c141dd2c2..1486731f72d07 100644
--- a/tests/aws/services/apigateway/test_apigateway_extended.validation.json
+++ b/tests/aws/services/apigateway/test_apigateway_extended.validation.json
@@ -6,15 +6,15 @@
"last_validated_date": "2024-10-10T18:54:41+00:00"
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_oas30_openapi[TEST_IMPORT_PETSTORE_SWAGGER]": {
- "last_validated_date": "2024-04-15T21:45:02+00:00"
+ "last_validated_date": "2025-05-06T18:34:11+00:00"
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_oas30_openapi[TEST_IMPORT_PETS]": {
- "last_validated_date": "2024-04-15T21:45:04+00:00"
+ "last_validated_date": "2025-05-06T18:34:17+00:00"
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_swagger_openapi[TEST_IMPORT_PETSTORE_SWAGGER]": {
- "last_validated_date": "2024-04-15T21:43:24+00:00"
+ "last_validated_date": "2025-05-06T18:20:25+00:00"
},
"tests/aws/services/apigateway/test_apigateway_extended.py::test_export_swagger_openapi[TEST_IMPORT_PETS]": {
- "last_validated_date": "2024-04-15T21:43:30+00:00"
+ "last_validated_date": "2025-05-06T18:20:36+00:00"
}
}
diff --git a/tests/aws/services/apigateway/test_apigateway_import.py b/tests/aws/services/apigateway/test_apigateway_import.py
index 30b437f5f8799..47599ae5ae4e4 100644
--- a/tests/aws/services/apigateway/test_apigateway_import.py
+++ b/tests/aws/services/apigateway/test_apigateway_import.py
@@ -389,12 +389,13 @@ def test_import_and_validate_rest_api(
"$.get-resources-swagger-json.items..resourceMethods.OPTIONS",
"$.get-resources-no-base-path-swagger.items..resourceMethods.GET",
"$.get-resources-no-base-path-swagger.items..resourceMethods.OPTIONS",
+ # TODO: not returned by LS
+ "$..endpointConfiguration.ipAddressType",
]
)
def test_import_rest_apis_with_base_path_swagger(
self,
base_path_type,
- create_rest_apigw,
apigw_create_rest_api,
import_apigw,
aws_client,
@@ -925,3 +926,41 @@ def test_import_with_integer_http_status_code(
# this fixture will iterate over every resource and match its method, methodResponse, integration and
# integrationResponse
apigw_snapshot_imported_resources(rest_api_id=rest_api_id, resources=response)
+
+ @markers.aws.validated
+ @pytest.mark.parametrize(
+ "put_mode",
+ ["merge", "overwrite"],
+ )
+ @markers.snapshot.skip_snapshot_verify(
+ paths=[
+ # not yet implemented
+ "$..endpointConfiguration.ipAddressType",
+ # issue because we create a new API internally, so we recreate names and resources
+ "$..name",
+ "$..rootResourceId",
+ # not returned even if empty in LocalStack
+ "$.get-rest-api.tags",
+ ]
+ )
+ def test_put_rest_api_mode_binary_media_types(
+ self, aws_client, apigw_create_rest_api, snapshot, put_mode
+ ):
+ base_api = apigw_create_rest_api(binaryMediaTypes=["image/heif"])
+ rest_api_id = base_api["id"]
+ snapshot.match("create-rest-api", base_api)
+
+ get_api = aws_client.apigateway.get_rest_api(restApiId=rest_api_id)
+ snapshot.match("get-rest-api", get_api)
+
+ spec_file = load_file(TEST_IMPORT_REST_API_FILE)
+ put_api = aws_client.apigateway.put_rest_api(
+ restApiId=rest_api_id,
+ body=spec_file,
+ mode=put_mode,
+ )
+ snapshot.match("put-api", put_api)
+
+ if is_aws_cloud():
+ # waiting before cleaning up to avoid TooManyRequests, as we create multiple REST APIs
+ time.sleep(15)
diff --git a/tests/aws/services/apigateway/test_apigateway_import.snapshot.json b/tests/aws/services/apigateway/test_apigateway_import.snapshot.json
index 3a19bd674145f..649fc5bed285b 100644
--- a/tests/aws/services/apigateway/test_apigateway_import.snapshot.json
+++ b/tests/aws/services/apigateway/test_apigateway_import.snapshot.json
@@ -1382,13 +1382,14 @@
}
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[ignore]": {
- "recorded-date": "15-04-2024, 21:33:04",
+ "recorded-date": "06-05-2025, 18:24:25",
"recorded-content": {
"put-rest-api-swagger-json": {
"apiKeySource": "HEADER",
"createdDate": "datetime",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -1765,13 +1766,14 @@
}
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[prepend]": {
- "recorded-date": "15-04-2024, 21:34:01",
+ "recorded-date": "06-05-2025, 18:25:39",
"recorded-content": {
"put-rest-api-swagger-json": {
"apiKeySource": "HEADER",
"createdDate": "datetime",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -2154,13 +2156,14 @@
}
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[split]": {
- "recorded-date": "15-04-2024, 21:34:50",
+ "recorded-date": "06-05-2025, 18:26:25",
"recorded-content": {
"put-rest-api-swagger-json": {
"apiKeySource": "HEADER",
"createdDate": "datetime",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"EDGE"
]
@@ -5309,5 +5312,150 @@
}
}
}
+ },
+ "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_put_rest_api_mode_binary_media_types[merge]": {
+ "recorded-date": "06-05-2025, 18:14:29",
+ "recorded-content": {
+ "create-rest-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/heif"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 201
+ }
+ },
+ "get-rest-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/heif"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "tags": {},
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "put-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/heif",
+ "image/png",
+ "image/jpg"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "tags": {},
+ "version": "1.0.0",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ }
+ }
+ },
+ "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_put_rest_api_mode_binary_media_types[overwrite]": {
+ "recorded-date": "06-05-2025, 18:15:09",
+ "recorded-content": {
+ "create-rest-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/heif"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 201
+ }
+ },
+ "get-rest-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/heif"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "tags": {},
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "put-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/png",
+ "image/jpg"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "tags": {},
+ "version": "1.0.0",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ }
+ }
}
}
diff --git a/tests/aws/services/apigateway/test_apigateway_import.validation.json b/tests/aws/services/apigateway/test_apigateway_import.validation.json
index f92baec36081c..63670ed857343 100644
--- a/tests/aws/services/apigateway/test_apigateway_import.validation.json
+++ b/tests/aws/services/apigateway/test_apigateway_import.validation.json
@@ -18,13 +18,13 @@
"last_validated_date": "2024-12-12T22:45:20+00:00"
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[ignore]": {
- "last_validated_date": "2024-04-15T21:32:25+00:00"
+ "last_validated_date": "2025-05-06T18:23:50+00:00"
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[prepend]": {
- "last_validated_date": "2024-04-15T21:33:49+00:00"
+ "last_validated_date": "2025-05-06T18:25:10+00:00"
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[split]": {
- "last_validated_date": "2024-04-15T21:34:46+00:00"
+ "last_validated_date": "2025-05-06T18:26:24+00:00"
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_swagger_api": {
"last_validated_date": "2024-04-15T21:30:39+00:00"
@@ -49,5 +49,11 @@
},
"tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_stage_variables": {
"last_validated_date": "2024-08-12T13:42:13+00:00"
+ },
+ "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_put_rest_api_mode_binary_media_types[merge]": {
+ "last_validated_date": "2025-05-06T18:14:28+00:00"
+ },
+ "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_put_rest_api_mode_binary_media_types[overwrite]": {
+ "last_validated_date": "2025-05-06T18:14:45+00:00"
}
}
diff --git a/tests/aws/services/apigateway/test_apigateway_lambda.py b/tests/aws/services/apigateway/test_apigateway_lambda.py
index b2b26e680b6cf..8aa53aaca9890 100644
--- a/tests/aws/services/apigateway/test_apigateway_lambda.py
+++ b/tests/aws/services/apigateway/test_apigateway_lambda.py
@@ -832,6 +832,7 @@ def test_lambda_selection_patterns(
resourceId=resource_id,
httpMethod="GET",
statusCode="200",
+ selectionPattern="",
)
# 4xx
aws_client.apigateway.put_integration_response(
@@ -839,15 +840,27 @@ def test_lambda_selection_patterns(
resourceId=resource_id,
httpMethod="GET",
statusCode="405",
- selectionPattern=".*400.*",
+ selectionPattern=".*four hundred.*",
)
+
# 5xx
aws_client.apigateway.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod="GET",
statusCode="502",
- selectionPattern=".*5\\d\\d.*",
+ selectionPattern=".+",
+ )
+
+ # assert that this does not get matched even though it's the status code returned by the Lambda, showing that
+ # AWS does match on the status code for this specific integration
+ # https://docs.aws.amazon.com/apigateway/latest/api/API_IntegrationResponse.html
+ aws_client.apigateway.put_integration_response(
+ restApiId=api_id,
+ resourceId=resource_id,
+ httpMethod="GET",
+ statusCode="504",
+ selectionPattern="200",
)
aws_client.apigateway.create_deployment(restApiId=api_id, stageName="dev")
diff --git a/tests/aws/services/apigateway/test_apigateway_lambda.snapshot.json b/tests/aws/services/apigateway/test_apigateway_lambda.snapshot.json
index f91bd1cb104c2..6cdf03ea63e3f 100644
--- a/tests/aws/services/apigateway/test_apigateway_lambda.snapshot.json
+++ b/tests/aws/services/apigateway/test_apigateway_lambda.snapshot.json
@@ -1473,23 +1473,23 @@
}
},
"tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_selection_patterns": {
- "recorded-date": "05-09-2023, 21:54:21",
+ "recorded-date": "05-05-2025, 14:10:11",
"recorded-content": {
"lambda-selection-pattern-200": "Pass",
"lambda-selection-pattern-400": {
- "errorMessage": "Error: Raising 400 from within the Lambda function",
+ "errorMessage": "Error: Raising four hundred from within the Lambda function",
"errorType": "Exception",
"requestId": "",
"stackTrace": [
- " File \"/var/task/lambda_select_pattern.py\", line 7, in handler\n raise Exception(\"Error: Raising 400 from within the Lambda function\")\n"
+ " File \"/var/task/lambda_select_pattern.py\", line 7, in handler\n raise Exception(\"Error: Raising four hundred from within the Lambda function\")\n"
]
},
"lambda-selection-pattern-500": {
- "errorMessage": "Error: Raising 500 from within the Lambda function",
+ "errorMessage": "Error: Raising five hundred from within the Lambda function",
"errorType": "Exception",
"requestId": "",
"stackTrace": [
- " File \"/var/task/lambda_select_pattern.py\", line 9, in handler\n raise Exception(\"Error: Raising 500 from within the Lambda function\")\n"
+ " File \"/var/task/lambda_select_pattern.py\", line 9, in handler\n raise Exception(\"Error: Raising five hundred from within the Lambda function\")\n"
]
}
}
diff --git a/tests/aws/services/apigateway/test_apigateway_lambda.validation.json b/tests/aws/services/apigateway/test_apigateway_lambda.validation.json
index 8dcc1e29b6fc8..c2a311dd64e4e 100644
--- a/tests/aws/services/apigateway/test_apigateway_lambda.validation.json
+++ b/tests/aws/services/apigateway/test_apigateway_lambda.validation.json
@@ -30,7 +30,7 @@
"last_validated_date": "2024-05-31T19:17:51+00:00"
},
"tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_selection_patterns": {
- "last_validated_date": "2023-09-05T19:54:21+00:00"
+ "last_validated_date": "2025-05-05T14:10:11+00:00"
},
"tests/aws/services/apigateway/test_apigateway_lambda.py::test_put_integration_aws_proxy_uri": {
"last_validated_date": "2025-03-03T12:58:39+00:00"
diff --git a/tests/aws/services/cloudformation/api/test_changesets.py b/tests/aws/services/cloudformation/api/test_changesets.py
index e7d9a793d7704..1f397310f5d21 100644
--- a/tests/aws/services/cloudformation/api/test_changesets.py
+++ b/tests/aws/services/cloudformation/api/test_changesets.py
@@ -1,8 +1,12 @@
+import copy
+import json
import os.path
import pytest
from botocore.exceptions import ClientError
+from localstack.aws.connect import ServiceLevelClientFactory
+from localstack.services.cloudformation.v2.utils import is_v2_engine
from localstack.testing.aws.cloudformation_utils import (
load_template_file,
load_template_raw,
@@ -17,6 +21,139 @@
)
+class TestUpdates:
+ @markers.aws.validated
+ def test_simple_update_single_resource(
+ self, aws_client: ServiceLevelClientFactory, deploy_cfn_template
+ ):
+ value1 = "foo"
+ value2 = "bar"
+ stack_name = f"stack-{short_uid()}"
+
+ t1 = {
+ "Resources": {
+ "MyParameter": {
+ "Type": "AWS::SSM::Parameter",
+ "Properties": {
+ "Type": "String",
+ "Value": value1,
+ },
+ },
+ },
+ "Outputs": {
+ "ParameterName": {
+ "Value": {"Ref": "MyParameter"},
+ },
+ },
+ }
+
+ res = deploy_cfn_template(stack_name=stack_name, template=json.dumps(t1), is_update=False)
+ parameter_name = res.outputs["ParameterName"]
+
+ found_value = aws_client.ssm.get_parameter(Name=parameter_name)["Parameter"]["Value"]
+ assert found_value == value1
+
+ t2 = copy.deepcopy(t1)
+ t2["Resources"]["MyParameter"]["Properties"]["Value"] = value2
+
+ deploy_cfn_template(stack_name=stack_name, template=json.dumps(t2), is_update=True)
+ found_value = aws_client.ssm.get_parameter(Name=parameter_name)["Parameter"]["Value"]
+ assert found_value == value2
+
+ res.destroy()
+
+ @pytest.mark.skipif(
+ condition=not is_v2_engine() and not is_aws_cloud(), reason="Not working in v2 yet"
+ )
+ @markers.aws.validated
+ def test_simple_update_two_resources(
+ self, aws_client: ServiceLevelClientFactory, deploy_cfn_template
+ ):
+ parameter_name = "my-parameter"
+ value1 = "foo"
+ value2 = "bar"
+ stack_name = f"stack-{short_uid()}"
+
+ t1 = {
+ "Resources": {
+ "MyParameter1": {
+ "Type": "AWS::SSM::Parameter",
+ "Properties": {
+ "Type": "String",
+ "Value": value1,
+ },
+ },
+ "MyParameter2": {
+ "Type": "AWS::SSM::Parameter",
+ "Properties": {
+ "Name": parameter_name,
+ "Type": "String",
+ "Value": {"Fn::GetAtt": ["MyParameter1", "Value"]},
+ },
+ },
+ },
+ }
+
+ res = deploy_cfn_template(stack_name=stack_name, template=json.dumps(t1), is_update=False)
+ found_value = aws_client.ssm.get_parameter(Name=parameter_name)["Parameter"]["Value"]
+ assert found_value == value1
+
+ t2 = copy.deepcopy(t1)
+ t2["Resources"]["MyParameter1"]["Properties"]["Value"] = value2
+
+ deploy_cfn_template(stack_name=stack_name, template=json.dumps(t2), is_update=True)
+ found_value = aws_client.ssm.get_parameter(Name=parameter_name)["Parameter"]["Value"]
+ assert found_value == value2
+
+ res.destroy()
+
+ @markers.aws.validated
+ # TODO: the error response is incorrect, however the test is otherwise validated and raises
+ # an error because the SSM parameter has been deleted (removed from the stack).
+ @markers.snapshot.skip_snapshot_verify(paths=["$..Error.Message", "$..message"])
+ @pytest.mark.skipif(
+ condition=not is_v2_engine() and not is_aws_cloud(), reason="Test fails with the old engine"
+ )
+ def test_deleting_resource(
+ self, aws_client: ServiceLevelClientFactory, deploy_cfn_template, snapshot
+ ):
+ parameter_name = "my-parameter"
+ value1 = "foo"
+
+ t1 = {
+ "Resources": {
+ "MyParameter1": {
+ "Type": "AWS::SSM::Parameter",
+ "Properties": {
+ "Type": "String",
+ "Value": value1,
+ },
+ },
+ "MyParameter2": {
+ "Type": "AWS::SSM::Parameter",
+ "Properties": {
+ "Name": parameter_name,
+ "Type": "String",
+ "Value": {"Fn::GetAtt": ["MyParameter1", "Value"]},
+ },
+ },
+ },
+ }
+
+ stack = deploy_cfn_template(template=json.dumps(t1))
+ found_value = aws_client.ssm.get_parameter(Name=parameter_name)["Parameter"]["Value"]
+ assert found_value == value1
+
+ t2 = copy.deepcopy(t1)
+ del t2["Resources"]["MyParameter2"]
+
+ deploy_cfn_template(stack_name=stack.stack_name, template=json.dumps(t2), is_update=True)
+ with pytest.raises(ClientError) as exc_info:
+ aws_client.ssm.get_parameter(Name=parameter_name)
+
+ snapshot.match("get-parameter-error", exc_info.value.response)
+
+
@markers.aws.validated
def test_create_change_set_without_parameters(
cleanup_stacks, cleanup_changesets, is_change_set_created_and_available, aws_client
diff --git a/tests/aws/services/cloudformation/api/test_changesets.validation.json b/tests/aws/services/cloudformation/api/test_changesets.validation.json
index 0e46e94326bff..3c3b7ffa3c6c3 100644
--- a/tests/aws/services/cloudformation/api/test_changesets.validation.json
+++ b/tests/aws/services/cloudformation/api/test_changesets.validation.json
@@ -1,4 +1,52 @@
{
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_dynamic]": {
+ "last_validated_date": "2025-04-03T07:11:44+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_parameter_for_condition_create_resource]": {
+ "last_validated_date": "2025-04-03T07:13:00+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_unrelated_property]": {
+ "last_validated_date": "2025-04-03T07:12:11+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_unrelated_property_not_create_only]": {
+ "last_validated_date": "2025-04-03T07:12:37+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_base_mapping_scenarios[update_string_referencing_resource]": {
+ "last_validated_date": "2025-04-03T07:23:48+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_conditions": {
+ "last_validated_date": "2025-04-01T14:34:35+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_direct_update": {
+ "last_validated_date": "2025-04-01T08:32:30+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_dynamic_update": {
+ "last_validated_date": "2025-04-01T12:30:53+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_execute_with_ref": {
+ "last_validated_date": "2025-04-11T14:34:09+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_mappings_with_parameter_lookup": {
+ "last_validated_date": "2025-04-01T13:31:33+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_mappings_with_static_fields": {
+ "last_validated_date": "2025-04-01T13:20:50+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_parameter_changes": {
+ "last_validated_date": "2025-04-01T12:43:36+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_unrelated_changes_requires_replacement": {
+ "last_validated_date": "2025-04-01T16:46:22+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestCaptureUpdateProcess::test_unrelated_changes_update_propagation": {
+ "last_validated_date": "2025-04-01T16:40:03+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestUpdates::test_deleting_resource": {
+ "last_validated_date": "2025-04-15T15:07:18+00:00"
+ },
+ "tests/aws/services/cloudformation/api/test_changesets.py::TestUpdates::test_simple_update_two_resources": {
+ "last_validated_date": "2025-04-02T10:05:26+00:00"
+ },
"tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_update_without_parameters": {
"last_validated_date": "2022-05-31T07:32:02+00:00"
},
diff --git a/tests/aws/services/cloudformation/resources/test_apigateway.py b/tests/aws/services/cloudformation/resources/test_apigateway.py
index b5c33580aed1e..bdae534baf3c6 100644
--- a/tests/aws/services/cloudformation/resources/test_apigateway.py
+++ b/tests/aws/services/cloudformation/resources/test_apigateway.py
@@ -3,14 +3,17 @@
from operator import itemgetter
import requests
+from localstack_snapshot.snapshots.transformer import SortingTransformer
from localstack import constants
from localstack.aws.api.lambda_ import Runtime
+from localstack.testing.aws.util import is_aws_cloud
from localstack.testing.pytest import markers
from localstack.utils.common import short_uid
from localstack.utils.files import load_file
from localstack.utils.run import to_str
from localstack.utils.strings import to_bytes
+from localstack.utils.sync import retry
from tests.aws.services.apigateway.apigateway_fixtures import api_invoke_url
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -108,7 +111,24 @@ def test_cfn_apigateway_aws_integration(deploy_cfn_template, aws_client):
@markers.aws.validated
-def test_cfn_apigateway_swagger_import(deploy_cfn_template, echo_http_server_post, aws_client):
+@markers.snapshot.skip_snapshot_verify(
+ paths=[
+ # TODO: not returned by LS
+ "$..endpointConfiguration.ipAddressType",
+ ]
+)
+def test_cfn_apigateway_swagger_import(
+ deploy_cfn_template, echo_http_server_post, aws_client, snapshot
+):
+ snapshot.add_transformers_list(
+ [
+ snapshot.transform.key_value("aws:cloudformation:stack-name"),
+ snapshot.transform.resource_name(),
+ snapshot.transform.key_value("id"),
+ snapshot.transform.key_value("name"),
+ snapshot.transform.key_value("rootResourceId"),
+ ]
+ )
api_name = f"rest-api-{short_uid()}"
deploy_cfn_template(
template=TEST_TEMPLATE_1,
@@ -121,13 +141,25 @@ def test_cfn_apigateway_swagger_import(deploy_cfn_template, echo_http_server_pos
]
assert len(apis) == 1
api_id = apis[0]["id"]
+ snapshot.match("imported-api", apis[0])
# construct API endpoint URL
url = api_invoke_url(api_id, stage="dev", path="/test")
# invoke API endpoint, assert results
- result = requests.post(url, data="test 123")
- assert result.ok
+ def _invoke():
+ _result = requests.post(url, data="test 123")
+ assert _result.ok
+ return _result
+
+ if is_aws_cloud():
+ sleep = 2
+ retries = 20
+ else:
+ sleep = 0.1
+ retries = 3
+
+ result = retry(_invoke, sleep=sleep, retries=retries)
content = json.loads(to_str(result.content))
assert content["data"] == "test 123"
assert content["url"].endswith("/post")
@@ -301,12 +333,16 @@ def test_cfn_deploy_apigateway_integration(deploy_cfn_template, snapshot, aws_cl
"$.get-stage.lastUpdatedDate",
"$.get-stage.methodSettings",
"$.get-stage.tags",
+ "$..endpointConfiguration.ipAddressType",
]
)
def test_cfn_deploy_apigateway_from_s3_swagger(
deploy_cfn_template, snapshot, aws_client, s3_bucket
):
snapshot.add_transformer(snapshot.transform.key_value("deploymentId"))
+ # FIXME: we need to sort the binaryMediaTypes as we don't return it in the same order as AWS, but this does not have
+ # behavior incidence
+ snapshot.add_transformer(SortingTransformer("binaryMediaTypes"))
# put the swagger file in S3
swagger_template = load_file(
os.path.join(os.path.dirname(__file__), "../../../files/pets.json")
@@ -344,7 +380,20 @@ def test_cfn_deploy_apigateway_from_s3_swagger(
@markers.aws.validated
-def test_cfn_apigateway_rest_api(deploy_cfn_template, aws_client):
+@markers.snapshot.skip_snapshot_verify(
+ paths=["$..endpointConfiguration.ipAddressType"],
+)
+def test_cfn_apigateway_rest_api(deploy_cfn_template, aws_client, snapshot):
+ snapshot.add_transformers_list(
+ [
+ snapshot.transform.key_value("aws:cloudformation:logical-id"),
+ snapshot.transform.key_value("aws:cloudformation:stack-name"),
+ snapshot.transform.resource_name(),
+ snapshot.transform.key_value("id"),
+ snapshot.transform.key_value("rootResourceId"),
+ ]
+ )
+
stack = deploy_cfn_template(
template_path=os.path.join(os.path.dirname(__file__), "../../../templates/apigateway.json")
)
@@ -362,6 +411,7 @@ def test_cfn_apigateway_rest_api(deploy_cfn_template, aws_client):
rs = aws_client.apigateway.get_rest_apis()
apis = [item for item in rs["items"] if item["name"] == "DemoApi_dev"]
assert len(apis) == 1
+ snapshot.match("rest-api", apis[0])
rs = aws_client.apigateway.get_models(restApiId=apis[0]["id"])
assert len(rs["items"]) == 3
diff --git a/tests/aws/services/cloudformation/resources/test_apigateway.snapshot.json b/tests/aws/services/cloudformation/resources/test_apigateway.snapshot.json
index 84ff13f4d5db1..446cef02dea60 100644
--- a/tests/aws/services/cloudformation/resources/test_apigateway.snapshot.json
+++ b/tests/aws/services/cloudformation/resources/test_apigateway.snapshot.json
@@ -107,13 +107,20 @@
}
},
"tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_deploy_apigateway_from_s3_swagger": {
- "recorded-date": "24-09-2024, 20:22:38",
+ "recorded-date": "06-05-2025, 18:31:54",
"recorded-content": {
"rest-api": {
"apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "application/pdf",
+ "image/gif",
+ "image/jpg",
+ "image/png"
+ ],
"createdDate": "datetime",
"disableExecuteApiEndpoint": false,
"endpointConfiguration": {
+ "ipAddressType": "ipv4",
"types": [
"REGIONAL"
]
@@ -669,5 +676,61 @@
}
}
}
+ },
+ "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_swagger_import": {
+ "recorded-date": "05-05-2025, 14:23:13",
+ "recorded-content": {
+ "imported-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "*/*"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "",
+ "rootResourceId": "",
+ "tags": {
+ "aws:cloudformation:logical-id": "Api",
+ "aws:cloudformation:stack-id": "arn::cloudformation::111111111111:stack//",
+ "aws:cloudformation:stack-name": ""
+ },
+ "version": "1.0"
+ }
+ }
+ },
+ "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_rest_api": {
+ "recorded-date": "05-05-2025, 14:50:14",
+ "recorded-content": {
+ "rest-api": {
+ "apiKeySource": "HEADER",
+ "binaryMediaTypes": [
+ "image/jpg",
+ "image/png"
+ ],
+ "createdDate": "datetime",
+ "disableExecuteApiEndpoint": false,
+ "endpointConfiguration": {
+ "ipAddressType": "ipv4",
+ "types": [
+ "EDGE"
+ ]
+ },
+ "id": "",
+ "name": "DemoApi_dev",
+ "rootResourceId": "",
+ "tags": {
+ "aws:cloudformation:logical-id": "",
+ "aws:cloudformation:stack-id": "arn::cloudformation::111111111111:stack//",
+ "aws:cloudformation:stack-name": ""
+ }
+ }
+ }
}
}
diff --git a/tests/aws/services/cloudformation/resources/test_apigateway.validation.json b/tests/aws/services/cloudformation/resources/test_apigateway.validation.json
index e19c16876c071..4fb5cf01a3874 100644
--- a/tests/aws/services/cloudformation/resources/test_apigateway.validation.json
+++ b/tests/aws/services/cloudformation/resources/test_apigateway.validation.json
@@ -6,10 +6,13 @@
"last_validated_date": "2024-04-15T22:59:53+00:00"
},
"tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_rest_api": {
- "last_validated_date": "2024-06-25T18:12:55+00:00"
+ "last_validated_date": "2025-05-05T14:50:14+00:00"
+ },
+ "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_swagger_import": {
+ "last_validated_date": "2025-05-05T14:23:13+00:00"
},
"tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_deploy_apigateway_from_s3_swagger": {
- "last_validated_date": "2024-09-24T20:22:37+00:00"
+ "last_validated_date": "2025-05-06T18:31:53+00:00"
},
"tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_deploy_apigateway_integration": {
"last_validated_date": "2024-02-21T12:54:34+00:00"
diff --git a/tests/aws/services/cloudformation/resources/test_ec2.py b/tests/aws/services/cloudformation/resources/test_ec2.py
index fd02a304130ce..84928dc37c21b 100644
--- a/tests/aws/services/cloudformation/resources/test_ec2.py
+++ b/tests/aws/services/cloudformation/resources/test_ec2.py
@@ -155,6 +155,8 @@ def test_dhcp_options(aws_client, deploy_cfn_template, snapshot):
"$..Tags",
"$..Options.AssociationDefaultRouteTableId",
"$..Options.PropagationDefaultRouteTableId",
+ "$..Options.TransitGatewayCidrBlocks", # an empty list returned by Moto but not by AWS
+ "$..Options.SecurityGroupReferencingSupport", # not supported by Moto
]
)
def test_transit_gateway_attachment(deploy_cfn_template, aws_client, snapshot):
diff --git a/tests/aws/services/cloudformation/resources/test_ec2.snapshot.json b/tests/aws/services/cloudformation/resources/test_ec2.snapshot.json
index 024a531d45896..0f42548858457 100644
--- a/tests/aws/services/cloudformation/resources/test_ec2.snapshot.json
+++ b/tests/aws/services/cloudformation/resources/test_ec2.snapshot.json
@@ -91,7 +91,7 @@
}
},
"tests/aws/services/cloudformation/resources/test_ec2.py::test_transit_gateway_attachment": {
- "recorded-date": "28-03-2024, 06:48:11",
+ "recorded-date": "08-04-2025, 10:51:02",
"recorded-content": {
"attachment": {
"Association": {
@@ -125,6 +125,7 @@
"DnsSupport": "enable",
"MulticastSupport": "disable",
"PropagationDefaultRouteTableId": "",
+ "SecurityGroupReferencingSupport": "disable",
"VpnEcmpSupport": "enable"
},
"OwnerId": "111111111111",
diff --git a/tests/aws/services/cloudformation/resources/test_ec2.validation.json b/tests/aws/services/cloudformation/resources/test_ec2.validation.json
index e9b8da44359c4..6eb9f2caf3324 100644
--- a/tests/aws/services/cloudformation/resources/test_ec2.validation.json
+++ b/tests/aws/services/cloudformation/resources/test_ec2.validation.json
@@ -24,7 +24,7 @@
"last_validated_date": "2024-07-01T20:10:52+00:00"
},
"tests/aws/services/cloudformation/resources/test_ec2.py::test_transit_gateway_attachment": {
- "last_validated_date": "2024-03-28T06:48:11+00:00"
+ "last_validated_date": "2025-04-08T10:51:02+00:00"
},
"tests/aws/services/cloudformation/resources/test_ec2.py::test_vpc_creates_default_sg": {
"last_validated_date": "2024-04-01T11:21:54+00:00"
diff --git a/tests/aws/services/cloudformation/resources/test_lambda.py b/tests/aws/services/cloudformation/resources/test_lambda.py
index 527a3321540ba..532ea5a11436d 100644
--- a/tests/aws/services/cloudformation/resources/test_lambda.py
+++ b/tests/aws/services/cloudformation/resources/test_lambda.py
@@ -253,6 +253,38 @@ def test_lambda_alias(deploy_cfn_template, snapshot, aws_client):
snapshot.match("Alias", alias)
+@markers.aws.validated
+def test_lambda_logging_config(deploy_cfn_template, snapshot, aws_client):
+ function_name = f"function{short_uid()}"
+
+ snapshot.add_transformer(snapshot.transform.cloudformation_api())
+ snapshot.add_transformer(SortingTransformer("StackResources", lambda x: x["LogicalResourceId"]))
+ snapshot.add_transformer(
+ snapshot.transform.key_value("LogicalResourceId", reference_replacement=False)
+ )
+ snapshot.add_transformer(
+ snapshot.transform.key_value("PhysicalResourceId", reference_replacement=False)
+ )
+ snapshot.add_transformer(snapshot.transform.regex(function_name, ""))
+
+ deployment = deploy_cfn_template(
+ template_path=os.path.join(
+ os.path.dirname(__file__), "../../../templates/cfn_lambda_logging_config.yaml"
+ ),
+ parameters={"FunctionName": function_name},
+ )
+
+ description = aws_client.cloudformation.describe_stack_resources(
+ StackName=deployment.stack_name
+ )
+ snapshot.match("stack_resource_descriptions", description)
+
+ logging_config = aws_client.lambda_.get_function(FunctionName=function_name)["Configuration"][
+ "LoggingConfig"
+ ]
+ snapshot.match("logging_config", logging_config)
+
+
@pytest.mark.skipif(
not in_default_partition(), reason="Test not applicable in non-default partitions"
)
diff --git a/tests/aws/services/cloudformation/resources/test_lambda.snapshot.json b/tests/aws/services/cloudformation/resources/test_lambda.snapshot.json
index c61888dca606a..d3e39608a2b41 100644
--- a/tests/aws/services/cloudformation/resources/test_lambda.snapshot.json
+++ b/tests/aws/services/cloudformation/resources/test_lambda.snapshot.json
@@ -1606,5 +1606,60 @@
"LayerVersionRef": "arn::lambda::111111111111:layer::1"
}
}
+ },
+ "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_logging_config": {
+ "recorded-date": "08-04-2025, 12:10:56",
+ "recorded-content": {
+ "stack_resource_descriptions": {
+ "StackResources": [
+ {
+ "DriftInformation": {
+ "StackResourceDriftStatus": "NOT_CHECKED"
+ },
+ "LogicalResourceId": "logical-resource-id",
+ "PhysicalResourceId": "physical-resource-id",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::Lambda::Function",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Timestamp": "timestamp"
+ },
+ {
+ "DriftInformation": {
+ "StackResourceDriftStatus": "NOT_CHECKED"
+ },
+ "LogicalResourceId": "logical-resource-id",
+ "PhysicalResourceId": "physical-resource-id",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::IAM::Role",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Timestamp": "timestamp"
+ },
+ {
+ "DriftInformation": {
+ "StackResourceDriftStatus": "NOT_CHECKED"
+ },
+ "LogicalResourceId": "logical-resource-id",
+ "PhysicalResourceId": "physical-resource-id",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::Lambda::Version",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Timestamp": "timestamp"
+ }
+ ],
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "logging_config": {
+ "ApplicationLogLevel": "INFO",
+ "LogFormat": "JSON",
+ "LogGroup": "/aws/lambda/",
+ "SystemLogLevel": "INFO"
+ }
+ }
}
}
diff --git a/tests/aws/services/cloudformation/resources/test_lambda.validation.json b/tests/aws/services/cloudformation/resources/test_lambda.validation.json
index 74611cffac904..910fd07381eec 100644
--- a/tests/aws/services/cloudformation/resources/test_lambda.validation.json
+++ b/tests/aws/services/cloudformation/resources/test_lambda.validation.json
@@ -41,6 +41,9 @@
"tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_layer_crud": {
"last_validated_date": "2024-12-20T18:23:31+00:00"
},
+ "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_logging_config": {
+ "last_validated_date": "2025-04-08T12:12:01+00:00"
+ },
"tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_version": {
"last_validated_date": "2024-04-09T07:21:37+00:00"
},
diff --git a/tests/aws/services/cloudformation/v2/test_change_set_conditions.py b/tests/aws/services/cloudformation/v2/test_change_set_conditions.py
new file mode 100644
index 0000000000000..9967f6cf4b607
--- /dev/null
+++ b/tests/aws/services/cloudformation/v2/test_change_set_conditions.py
@@ -0,0 +1,184 @@
+import pytest
+from localstack_snapshot.snapshots.transformer import RegexTransformer
+
+from localstack.services.cloudformation.v2.utils import is_v2_engine
+from localstack.testing.aws.util import is_aws_cloud
+from localstack.testing.pytest import markers
+from localstack.utils.strings import long_uid
+
+
+@pytest.mark.skipif(
+ condition=not is_v2_engine() and not is_aws_cloud(), reason="Requires the V2 engine"
+)
+@markers.snapshot.skip_snapshot_verify(
+ paths=[
+ "per-resource-events..*",
+ "delete-describe..*",
+ #
+ "$..ChangeSetId", # An issue for the WIP executor
+ # Before/After Context
+ "$..Capabilities",
+ "$..NotificationARNs",
+ "$..IncludeNestedStacks",
+ "$..Scope",
+ "$..Details",
+ "$..Parameters",
+ "$..Replacement",
+ "$..PolicyAction",
+ ]
+)
+class TestChangeSetConditions:
+ @markers.aws.validated
+ @pytest.mark.skip(
+ reason=(
+ "The inclusion of response parameters in executor is in progress, "
+ "currently it cannot delete due to missing topic arn in the request"
+ )
+ )
+ def test_condition_update_removes_resource(
+ self,
+ snapshot,
+ capture_update_process,
+ ):
+ name1 = f"topic-name-1-{long_uid()}"
+ name2 = f"topic-name-2-{long_uid()}"
+ snapshot.add_transformer(RegexTransformer(name1, "topic-name-1"))
+ snapshot.add_transformer(RegexTransformer(name2, "topic-name-2"))
+ template_1 = {
+ "Conditions": {"CreateTopic": {"Fn::Equals": ["true", "true"]}},
+ "Resources": {
+ "SNSTopic": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name1},
+ }
+ },
+ }
+ template_2 = {
+ "Conditions": {"CreateTopic": {"Fn::Equals": ["true", "false"]}},
+ "Resources": {
+ "SNSTopic": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name1},
+ },
+ "TopicPlaceholder": {
+ "Type": "AWS::SNS::Topic",
+ "Properties": {"TopicName": name2},
+ },
+ },
+ }
+ capture_update_process(snapshot, template_1, template_2)
+
+ @markers.aws.validated
+ def test_condition_update_adds_resource(
+ self,
+ snapshot,
+ capture_update_process,
+ ):
+ name1 = f"topic-name-1-{long_uid()}"
+ name2 = f"topic-name-2-{long_uid()}"
+ snapshot.add_transformer(RegexTransformer(name1, "topic-name-1"))
+ snapshot.add_transformer(RegexTransformer(name2, "topic-name-2"))
+ template_1 = {
+ "Conditions": {"CreateTopic": {"Fn::Equals": ["true", "false"]}},
+ "Resources": {
+ "SNSTopic": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name1},
+ },
+ "TopicPlaceholder": {
+ "Type": "AWS::SNS::Topic",
+ "Properties": {"TopicName": name2},
+ },
+ },
+ }
+ template_2 = {
+ "Conditions": {"CreateTopic": {"Fn::Equals": ["true", "true"]}},
+ "Resources": {
+ "SNSTopic": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name1},
+ },
+ "TopicPlaceholder": {
+ "Type": "AWS::SNS::Topic",
+ "Properties": {"TopicName": name2},
+ },
+ },
+ }
+ capture_update_process(snapshot, template_1, template_2)
+
+ @markers.aws.validated
+ @pytest.mark.skip(
+ reason="The inclusion of response parameters in executor is in progress, "
+ "currently it cannot delete due to missing topic arn in the request"
+ )
+ def test_condition_add_new_negative_condition_to_existent_resource(
+ self,
+ snapshot,
+ capture_update_process,
+ ):
+ name1 = f"topic-name-1-{long_uid()}"
+ name2 = f"topic-name-2-{long_uid()}"
+ snapshot.add_transformer(RegexTransformer(name1, "topic-name-1"))
+ snapshot.add_transformer(RegexTransformer(name2, "topic-name-2"))
+ template_1 = {
+ "Resources": {
+ "SNSTopic": {
+ "Type": "AWS::SNS::Topic",
+ "Properties": {"TopicName": name1},
+ },
+ },
+ }
+ template_2 = {
+ "Conditions": {"CreateTopic": {"Fn::Equals": ["true", "false"]}},
+ "Resources": {
+ "SNSTopic": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name1},
+ },
+ "TopicPlaceholder": {
+ "Type": "AWS::SNS::Topic",
+ "Properties": {"TopicName": name2},
+ },
+ },
+ }
+ capture_update_process(snapshot, template_1, template_2)
+
+ @markers.aws.validated
+ def test_condition_add_new_positive_condition_to_existent_resource(
+ self,
+ snapshot,
+ capture_update_process,
+ ):
+ name1 = f"topic-name-1-{long_uid()}"
+ name2 = f"topic-name-2-{long_uid()}"
+ snapshot.add_transformer(RegexTransformer(name1, "topic-name-1"))
+ snapshot.add_transformer(RegexTransformer(name2, "topic-name-2"))
+ template_1 = {
+ "Resources": {
+ "SNSTopic1": {
+ "Type": "AWS::SNS::Topic",
+ "Properties": {"TopicName": name1},
+ },
+ },
+ }
+ template_2 = {
+ "Conditions": {"CreateTopic": {"Fn::Equals": ["true", "true"]}},
+ "Resources": {
+ "SNSTopic1": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name1},
+ },
+ "SNSTopic2": {
+ "Type": "AWS::SNS::Topic",
+ "Condition": "CreateTopic",
+ "Properties": {"TopicName": name2},
+ },
+ },
+ }
+ capture_update_process(snapshot, template_1, template_2)
diff --git a/tests/aws/services/cloudformation/v2/test_change_set_conditions.snapshot.json b/tests/aws/services/cloudformation/v2/test_change_set_conditions.snapshot.json
new file mode 100644
index 0000000000000..147c4f2eae447
--- /dev/null
+++ b/tests/aws/services/cloudformation/v2/test_change_set_conditions.snapshot.json
@@ -0,0 +1,1536 @@
+{
+ "tests/aws/services/cloudformation/v2/test_change_set_conditions.py::TestChangeSetConditions::test_condition_update_removes_resource": {
+ "recorded-date": "15-04-2025, 13:51:50",
+ "recorded-content": {
+ "create-change-set-1": {
+ "Id": "arn::cloudformation::111111111111:changeSet/",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "describe-change-set-1-prop-values": {
+ "Capabilities": [],
+ "ChangeSetId": "arn::cloudformation::111111111111:changeSet/",
+ "ChangeSetName": "",
+ "Changes": [
+ {
+ "ResourceChange": {
+ "Action": "Add",
+ "AfterContext": {
+ "Properties": {
+ "TopicName": "topic-name-1"
+ }
+ },
+ "Details": [],
+ "LogicalResourceId": "SNSTopic",
+ "Replacement": "True",
+ "ResourceType": "AWS::SNS::Topic",
+ "Scope": []
+ },
+ "Type": "Resource"
+ }
+ ],
+ "CreationTime": "datetime",
+ "ExecutionStatus": "AVAILABLE",
+ "IncludeNestedStacks": false,
+ "NotificationARNs": [],
+ "RollbackConfiguration": {},
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Status": "CREATE_COMPLETE",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "describe-change-set-1": {
+ "Capabilities": [],
+ "ChangeSetId": "arn::cloudformation::111111111111:changeSet/",
+ "ChangeSetName": "",
+ "Changes": [
+ {
+ "ResourceChange": {
+ "Action": "Add",
+ "Details": [],
+ "LogicalResourceId": "SNSTopic",
+ "ResourceType": "AWS::SNS::Topic",
+ "Scope": []
+ },
+ "Type": "Resource"
+ }
+ ],
+ "CreationTime": "datetime",
+ "ExecutionStatus": "AVAILABLE",
+ "IncludeNestedStacks": false,
+ "NotificationARNs": [],
+ "RollbackConfiguration": {},
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Status": "CREATE_COMPLETE",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "execute-change-set-1": {
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "post-create-1-describe": {
+ "ChangeSetId": "arn::cloudformation::111111111111:changeSet/",
+ "CreationTime": "datetime",
+ "DisableRollback": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ },
+ "EnableTerminationProtection": false,
+ "LastUpdatedTime": "datetime",
+ "NotificationARNs": [],
+ "RollbackConfiguration": {},
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "StackStatus": "CREATE_COMPLETE",
+ "Tags": []
+ },
+ "create-change-set-2": {
+ "Id": "arn::cloudformation::111111111111:changeSet/",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "describe-change-set-2-prop-values": {
+ "Capabilities": [],
+ "ChangeSetId": "arn::cloudformation::111111111111:changeSet/",
+ "ChangeSetName": "",
+ "Changes": [
+ {
+ "ResourceChange": {
+ "Action": "Remove",
+ "BeforeContext": {
+ "Properties": {
+ "TopicName": "topic-name-1"
+ }
+ },
+ "Details": [],
+ "LogicalResourceId": "SNSTopic",
+ "PhysicalResourceId": "arn::sns::111111111111:topic-name-1",
+ "PolicyAction": "Delete",
+ "ResourceType": "AWS::SNS::Topic",
+ "Scope": []
+ },
+ "Type": "Resource"
+ },
+ {
+ "ResourceChange": {
+ "Action": "Add",
+ "AfterContext": {
+ "Properties": {
+ "TopicName": "topic-name-2"
+ }
+ },
+ "Details": [],
+ "LogicalResourceId": "TopicPlaceholder",
+ "Replacement": "True",
+ "ResourceType": "AWS::SNS::Topic",
+ "Scope": []
+ },
+ "Type": "Resource"
+ }
+ ],
+ "CreationTime": "datetime",
+ "ExecutionStatus": "AVAILABLE",
+ "IncludeNestedStacks": false,
+ "NotificationARNs": [],
+ "RollbackConfiguration": {},
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Status": "CREATE_COMPLETE",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "describe-change-set-2": {
+ "Capabilities": [],
+ "ChangeSetId": "arn::cloudformation::111111111111:changeSet/",
+ "ChangeSetName": "",
+ "Changes": [
+ {
+ "ResourceChange": {
+ "Action": "Remove",
+ "Details": [],
+ "LogicalResourceId": "SNSTopic",
+ "PhysicalResourceId": "arn::sns::111111111111:topic-name-1",
+ "PolicyAction": "Delete",
+ "ResourceType": "AWS::SNS::Topic",
+ "Scope": []
+ },
+ "Type": "Resource"
+ },
+ {
+ "ResourceChange": {
+ "Action": "Add",
+ "Details": [],
+ "LogicalResourceId": "TopicPlaceholder",
+ "ResourceType": "AWS::SNS::Topic",
+ "Scope": []
+ },
+ "Type": "Resource"
+ }
+ ],
+ "CreationTime": "datetime",
+ "ExecutionStatus": "AVAILABLE",
+ "IncludeNestedStacks": false,
+ "NotificationARNs": [],
+ "RollbackConfiguration": {},
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Status": "CREATE_COMPLETE",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "execute-change-set-2": {
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "post-create-2-describe": {
+ "ChangeSetId": "arn::cloudformation::111111111111:changeSet/",
+ "CreationTime": "datetime",
+ "DisableRollback": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ },
+ "EnableTerminationProtection": false,
+ "LastUpdatedTime": "datetime",
+ "NotificationARNs": [],
+ "RollbackConfiguration": {},
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "StackStatus": "UPDATE_COMPLETE",
+ "Tags": []
+ },
+ "per-resource-events": {
+ "SNSTopic": [
+ {
+ "EventId": "SNSTopic-c494ee19-3e85-4cf7-b823-5b706137c086",
+ "LogicalResourceId": "SNSTopic",
+ "PhysicalResourceId": "arn::sns::111111111111:topic-name-1",
+ "ResourceStatus": "DELETE_COMPLETE",
+ "ResourceType": "AWS::SNS::Topic",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Timestamp": "timestamp"
+ },
+ {
+ "EventId": "SNSTopic-f1a45cee-c917-4856-9b04-fdfa3d210cf3",
+ "LogicalResourceId": "SNSTopic",
+ "PhysicalResourceId": "arn::sns::111111111111:topic-name-1",
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceType": "AWS::SNS::Topic",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Timestamp": "timestamp"
+ },
+ {
+ "EventId": "SNSTopic-CREATE_COMPLETE-date",
+ "LogicalResourceId": "SNSTopic",
+ "PhysicalResourceId": "arn::sns::111111111111:topic-name-1",
+ "ResourceProperties": {
+ "TopicName": "topic-name-1"
+ },
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::SNS::Topic",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "",
+ "Timestamp": "timestamp"
+ },
+ {
+ "EventId": "SNSTopic-CREATE_IN_PROGRESS-date",
+ "LogicalResourceId": "SNSTopic",
+ "PhysicalResourceId": "arn::sns::111111111111:topic-name-1",
+ "ResourceProperties": {
+ "TopicName": "topic-name-1"
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceType": "AWS::SNS::Topic",
+ "StackId": "arn::cloudformation::111111111111:stack//",
+ "StackName": "