From d85c56851b48185814c0d249f9ed62349518d1d2 Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Mon, 23 Jun 2025 21:30:29 -0700 Subject: [PATCH 1/8] 1/2 services pages done + capabilities, enterprise, getting started, integrations --- .../chaos-engineering/chaos-api.md | 2 +- .../docs/aws/capabilities/config/podman.md | 2 +- .../networking/accessing-endpoint-url.mdx | 4 +-- .../networking/external-port-range.mdx | 2 +- .../custom-tls-certificates.mdx | 4 +-- .../security-testing/explainable-iam.md | 4 +-- .../security-testing/iam-policy-stream.md | 2 +- .../state-management/cloud-pods.mdx | 6 ++-- .../state-management/persistence.mdx | 2 +- .../docs/aws/enterprise/k8s-operator.md | 2 +- src/content/docs/aws/getting-started/faq.mdx | 4 +-- .../docs/aws/getting-started/installation.mdx | 4 +-- .../docs/aws/getting-started/quickstart.mdx | 12 +++---- .../integrations/app-frameworks/quarkus.md | 4 +-- .../app-frameworks/serverless-framework.md | 6 ++-- .../app-frameworks/spring-cloud-function.mdx | 12 +++---- .../integrations/aws-native-tools/aws-sam.md | 2 +- .../docs/aws/integrations/aws-sdks/cpp.md | 2 +- .../docs/aws/integrations/aws-sdks/go.mdx | 4 +-- .../docs/aws/integrations/aws-sdks/java.mdx | 16 +++++----- .../aws/integrations/aws-sdks/javascript.mdx | 4 +-- .../docs/aws/integrations/aws-sdks/net.md | 4 +-- .../docs/aws/integrations/aws-sdks/php.md | 2 +- .../aws/integrations/aws-sdks/python-boto3.md | 2 +- .../docs/aws/integrations/aws-sdks/ruby.md | 2 +- .../integrations/containers/devcontainers.mdx | 10 +++--- .../aws/integrations/containers/gitpod.md | 2 +- .../integrations/containers/kubernetes.mdx | 4 +-- .../containers/rancher-desktop.mdx | 4 +-- .../continuous-integration/bitbucket.md | 4 +-- .../continuous-integration/circleci.md | 24 +++++++------- .../continuous-integration/codebuild.md | 28 ++++++++-------- .../continuous-integration/github-actions.md | 14 ++++---- .../continuous-integration/gitlab-ci.md | 16 +++++----- .../continuous-integration/travis-ci.md | 4 +-- .../infrastructure-as-code/cloud-custodian.md | 2 +- .../infrastructure-as-code/crossplane.md | 2 +- .../infrastructure-as-code/terraform.mdx | 32 +++++++++---------- .../messaging/selfmanaged-kafka-cluster.md | 10 +++--- .../testing/lambdatest-hyperexecute.md | 6 ++-- .../integrations/testing/testcontainers.mdx | 10 +++--- src/content/docs/aws/services/account.mdx | 6 ++-- src/content/docs/aws/services/acm.mdx | 2 +- src/content/docs/aws/services/apacheflink.mdx | 8 ++--- src/content/docs/aws/services/apigateway.mdx | 16 +++++----- .../docs/aws/services/appautoscaling.mdx | 2 +- src/content/docs/aws/services/appconfig.mdx | 2 +- src/content/docs/aws/services/appsync.mdx | 2 +- src/content/docs/aws/services/athena.mdx | 2 +- src/content/docs/aws/services/backup.mdx | 4 +-- .../docs/aws/services/cloudformation.mdx | 4 +-- 51 files changed, 165 insertions(+), 165 deletions(-) diff --git a/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md b/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md index 636f0edb..7435897a 100644 --- a/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md +++ b/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md @@ -49,7 +49,7 @@ When active, rules are evaluated sequentially on every request to LocalStack unt The schema for the configuration is as follows. -```json +```json showLineNumbers [ { "region": "(str) Region name, e.g. 'ap-south-1'. If omitted, all regions are affected.", diff --git a/src/content/docs/aws/capabilities/config/podman.md b/src/content/docs/aws/capabilities/config/podman.md index 86582a71..4a1bd611 100644 --- a/src/content/docs/aws/capabilities/config/podman.md +++ b/src/content/docs/aws/capabilities/config/podman.md @@ -101,7 +101,7 @@ podman machine set --rootful For the Docker Compose setup, use the following configuration. When running in rootless mode, ensure to comment out the HTTPS gateway port, as it is unable to bind to privileged ports below 1024. -```yaml +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx b/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx index ec94da76..f2c35196 100644 --- a/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx +++ b/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx @@ -130,7 +130,7 @@ docker run --rm -it --dns 172.27.0.2 --network ls ``` -```yaml +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -200,7 +200,7 @@ docker run --rm it --network my-network ``` -```yaml +```yaml showLineNumbers services: localstack: # other configuration here diff --git a/src/content/docs/aws/capabilities/networking/external-port-range.mdx b/src/content/docs/aws/capabilities/networking/external-port-range.mdx index c75c753b..43be5dd8 100644 --- a/src/content/docs/aws/capabilities/networking/external-port-range.mdx +++ b/src/content/docs/aws/capabilities/networking/external-port-range.mdx @@ -52,7 +52,7 @@ GATEWAY_LISTEN=0.0.0.0:4766 EXTERNAL_SERVICE_PORTS_START=4710 EXTERNAL_SERVICE_P ``` -```yaml +```yaml showLineNumbers services: localstack-main-1: container_name: localstack-main-1 diff --git a/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx b/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx index f05da937..02abe182 100644 --- a/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx +++ b/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx @@ -34,7 +34,7 @@ If you run LocalStack in a docker container (which includes using [the CLI](/aws Create a `Dockerfile` containing the following commands: -```yaml +```yaml showLineNumbers FROM localstack/localstack:latest # or if using the pro image: FROM localstack/localstack-pro:latest @@ -74,7 +74,7 @@ docker run ``` -```yaml +```yaml showLineNumbers services: localstack: image: diff --git a/src/content/docs/aws/capabilities/security-testing/explainable-iam.md b/src/content/docs/aws/capabilities/security-testing/explainable-iam.md index 9b311799..d8c0f950 100644 --- a/src/content/docs/aws/capabilities/security-testing/explainable-iam.md +++ b/src/content/docs/aws/capabilities/security-testing/explainable-iam.md @@ -28,7 +28,7 @@ However we have not included the `iam:PassRole` permission, and we will use the Create a policy document named `policy_1.json` and add the following content: -```json +```json showLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -113,7 +113,7 @@ You can incorporate this action into the policy. For illustrative purposes, we will keep the example straightforward, using the same wildcard resource. Edit the `policy_1.json` file to include the `iam:PassRole` action: -```json +```json showLineNumbers { "Version": "2012-10-17", "Statement": [ diff --git a/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md b/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md index 705bd435..b245c1f9 100644 --- a/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md +++ b/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md @@ -57,7 +57,7 @@ awslocal sns create-topic --name test-topic In the other tab, the required policy will be generated. This policy can then be attached to an IAM role, enabling it to create the resource. -```bash +```bash showLineNumbers Attached to identity: "arn:aws:iam::000000000000:root" Policy: diff --git a/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx b/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx index ff928163..2b64865a 100644 --- a/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx +++ b/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx @@ -298,7 +298,7 @@ AUTO_LOAD_POD=foo-pod localstack start ``` -```yaml +```yaml showLineNumbers services: localstack: container_name: "localstack-main" @@ -352,7 +352,7 @@ LocalStack, upon mounting `init-pods.d` to the appropriate location, will sequen The docker compose file for correctly mounting `init-pods.d` will look like: -```yaml +```yaml showLineNumbers services: localstack: container_name: "localstack-main" @@ -498,7 +498,7 @@ With such a configuration, the `foo-pod` Cloud Pod will be loaded from the `bar- To properly configure the remote, you need to provide the needed environment variables when starting the LocalStack container. For instance, a S3 remote needs a `AWS_ACCESS_KEY` and a `AWS_SECRET_ACCESS_KEY`, as follows: -```yaml +```yaml showLineNumbers services: localstack: container_name: "localstack-main" diff --git a/src/content/docs/aws/capabilities/state-management/persistence.mdx b/src/content/docs/aws/capabilities/state-management/persistence.mdx index ad8a4232..2f4907c7 100644 --- a/src/content/docs/aws/capabilities/state-management/persistence.mdx +++ b/src/content/docs/aws/capabilities/state-management/persistence.mdx @@ -28,7 +28,7 @@ PERSISTENCE=1 localstack start ``` -```yaml +```yaml showLineNumbers image: localstack/localstack-pro environment: - LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} diff --git a/src/content/docs/aws/enterprise/k8s-operator.md b/src/content/docs/aws/enterprise/k8s-operator.md index 53450489..a24dcd7d 100644 --- a/src/content/docs/aws/enterprise/k8s-operator.md +++ b/src/content/docs/aws/enterprise/k8s-operator.md @@ -22,7 +22,7 @@ kubectl apply -f https://raw.githubusercontent.com/localstack/localstack-k8s-ope You can then deploy a LocalStack instance by storing the following file content as `localstack.yml` and applying it against the cluster via `kubectl apply -f localstack.yml`. -```bash +```bash showLineNumbers apiVersion: api.localstack.cloud/v1alpha1 kind: LocalStack metadata: diff --git a/src/content/docs/aws/getting-started/faq.mdx b/src/content/docs/aws/getting-started/faq.mdx index b08ab24a..64c59c7a 100644 --- a/src/content/docs/aws/getting-started/faq.mdx +++ b/src/content/docs/aws/getting-started/faq.mdx @@ -235,7 +235,7 @@ To do so, you need to change the [`docker-compose.yml`](https://github.com/local -```yaml +```yaml showLineNumbers volumes: - "/var/run/docker.sock:/var/run/docker.sock" - "\\\\wsl$\\\\home\\\\volume:/var/lib/localstack" # mount volume in WSL2 Linux file system @@ -246,7 +246,7 @@ To do so, you need to change the [`docker-compose.yml`](https://github.com/local As an alternative, you can set the volume as `- "~/volume:/var/lib/localstack"` then start Docker using command `wsl docker compose -f docker-compose.yml up`. -```yaml +```yaml showLineNumbers volumes: - "/var/run/docker.sock:/var/run/docker.sock" - "localstack_data:/var/lib/localstack" # mount Docker volume diff --git a/src/content/docs/aws/getting-started/installation.mdx b/src/content/docs/aws/getting-started/installation.mdx index cd05ca4f..8e74b350 100644 --- a/src/content/docs/aws/getting-started/installation.mdx +++ b/src/content/docs/aws/getting-started/installation.mdx @@ -246,7 +246,7 @@ Docker Compose v1.9.0 and above is supported. -```yaml +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -264,7 +264,7 @@ services: -```yaml +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/getting-started/quickstart.mdx b/src/content/docs/aws/getting-started/quickstart.mdx index eb27a1db..92ab4b69 100644 --- a/src/content/docs/aws/getting-started/quickstart.mdx +++ b/src/content/docs/aws/getting-started/quickstart.mdx @@ -169,7 +169,7 @@ awslocal sns subscribe \ #### Create the Presign Lambda -```bash +```bash showLineNumbers (cd lambdas/presign; rm -f lambda.zip; zip lambda.zip handler.py) awslocal lambda create-function \ --function-name presign \ @@ -187,7 +187,7 @@ $ awslocal lambda create-function-url-config \ #### Create the Image List Lambda -```bash +```bash showLineNumbers (cd lambdas/list; rm -f lambda.zip; zip lambda.zip handler.py) awslocal lambda create-function \ --function-name list \ @@ -207,7 +207,7 @@ $ awslocal lambda create-function-url-config \ - ```bash + ```bash showLineNumbers cd lambdas/resize rm -rf libs lambda.zip docker run --platform linux/x86_64 -v "$PWD":/var/task "public.ecr.aws/sam/build-python3.11" /bin/sh -c "pip install -r requirements.txt -t libs; exit" @@ -218,7 +218,7 @@ $ awslocal lambda create-function-url-config \ ``` - ```bash + ```bash showLineNumbers cd lambdas/resize rm -rf package lambda.zip mkdir package @@ -230,7 +230,7 @@ $ awslocal lambda create-function-url-config \ ``` - ```bash + ```bash showLineNumbers cd lambdas/resize rm -rf package lambda.zip mkdir package @@ -245,7 +245,7 @@ $ awslocal lambda create-function-url-config \ #### Create the Image Resizer Lambda -```bash +```bash showLineNumbers awslocal lambda create-function \ --function-name resize \ --runtime python3.11 \ diff --git a/src/content/docs/aws/integrations/app-frameworks/quarkus.md b/src/content/docs/aws/integrations/app-frameworks/quarkus.md index 60ee3bb0..08413f6d 100644 --- a/src/content/docs/aws/integrations/app-frameworks/quarkus.md +++ b/src/content/docs/aws/integrations/app-frameworks/quarkus.md @@ -30,7 +30,7 @@ The Lambda extension is based on [AWS Java SDK 2.x](https://docs.aws.amazon.com/ Create a new project with the following command: -```bash +```bash showLineNumbers mvn io.quarkus.platform:quarkus-maven-plugin:3.6.3:create \ -DprojectGroupId=org.acme \ -DprojectArtifactId=amazon-lambda-quickstart \ @@ -72,7 +72,7 @@ Add the following dependencies to the `pom.xml` file: To configure LocalStack, add the following properties to the `application.properties` file: -```bash +```bash showLineNumbers quarkus.lambda.endpoint-override=http://localhost:4566 quarkus.lambda.aws.region=us-east-1 diff --git a/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md b/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md index f53921aa..6237ff8b 100644 --- a/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md +++ b/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md @@ -26,7 +26,7 @@ This guide assumes that you have the following tools installed. It also assumes that you already have a Serverless app set up consisting of a couple of Lambda functions and a `serverless.yml` file similar to the following. An example Serverless app integrated with LocalStack can be found here: Simple REST API using the Serverless Framework and LocalStack -```yaml +```yaml showLineNumbers service: my-service frameworkVersion: ">=1.1.0 <=2.50.0" @@ -115,7 +115,7 @@ Hence, you need to configure the Lambda functions to use the `AWS_ENDPOINT_URL` In Python, this may look something like. The code detects if it is running in LocalStack by checking if the `AWS_ENDPOINT_URL` variable exists and then configures the endpoint URL accordingly. -```python +```python showLineNumbers ... if 'AWS_ENDPOINT_URL' in os.environ: dynamodb = boto3.resource('dynamodb', endpoint_url=os.environ['AWS_ENDPOINT_URL']) @@ -182,7 +182,7 @@ Use the displayed endpoint `http://localhost:4566/restapis/XXXXXXXXXX/local/_use serverless-localstack supports a feature for lambda functions that allows local code mounting: -```yaml +```yaml showLineNumbers # serverless.yml custom: diff --git a/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx b/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx index 08803029..91bbbe73 100644 --- a/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx +++ b/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx @@ -210,7 +210,7 @@ In this project, we are following [official documentation](https://docs.aws.amazon.com/lambda/latest/dg/java-logging.html#java-wt-logging-using-log4j2.8) to setup up `src/main/resources/log4j2.xml` content. -```xml title="log4j2.xml" +```xml title="log4j2.xml" showLineNumbers ?xml version="1.0" encoding="UTF-8"?> @@ -276,7 +276,7 @@ Let's configure it by creating a new configuration class `JacksonConfiguration.k `src/main/kotlin/org/localstack/sampleproject/config`: -```kotlin title="JacksonConfiguration.kt" +```kotlin title="JacksonConfiguration.kt" showLineNumbers package org.localstack.sampleproject.config import com.fasterxml.jackson.annotation.JsonInclude @@ -319,7 +319,7 @@ implementations. Let's create a small logging utility to simplify interactions with the logger -```kotlin title="Logger.kt" +```kotlin title="Logger.kt" showLineNumbers package org.localstack.sampleproject.util import org.apache.logging.log4j.LogManager @@ -575,7 +575,7 @@ for usage examples. -```yaml +```yaml showLineNumbers service: localstack-sampleproject-serverless provider: @@ -628,7 +628,7 @@ functions: ``` -```java title="ApplicationStack.kt" +```java title="ApplicationStack.kt" showLineNumbers package org.localstack.cdkstack import java.util.UUID @@ -703,7 +703,7 @@ class ApplicationStack(parent: Construct, name: String) : Stack(parent, name) { ``` -```hcl title="variables.tf" +```hcl title="variables.tf" showLineNumbers variable "STAGE" { type = string default = "local" diff --git a/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md b/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md index 8ea2e3c7..5b6be787 100644 --- a/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md +++ b/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md @@ -69,7 +69,7 @@ The `samlocal` wrapper will package and deploy the application to LocalStack. To debug your Lambda functions in VS Code while using the SAM CLI's `sam local` command alongside other services provided by LocalStack, set up a launch configuration in the `.vscode/launch.json` file. Insert the following settings into the file: -```json +```json showLineNumbers { "type": "aws-sam", "request": "direct-invoke", diff --git a/src/content/docs/aws/integrations/aws-sdks/cpp.md b/src/content/docs/aws/integrations/aws-sdks/cpp.md index 4402f359..5f99243f 100644 --- a/src/content/docs/aws/integrations/aws-sdks/cpp.md +++ b/src/content/docs/aws/integrations/aws-sdks/cpp.md @@ -15,7 +15,7 @@ which is the preferred way of integrating the C++ SDK with LocalStack. Consider the following example, which creates an SQS queue, sends a message to it, then receives the same message via the SDK: -```cpp +```cpp showLineNumbers #include #include #include diff --git a/src/content/docs/aws/integrations/aws-sdks/go.mdx b/src/content/docs/aws/integrations/aws-sdks/go.mdx index 6fff45dd..850804ac 100644 --- a/src/content/docs/aws/integrations/aws-sdks/go.mdx +++ b/src/content/docs/aws/integrations/aws-sdks/go.mdx @@ -24,7 +24,7 @@ Full examples for both SDK versions can be found [in our samples repository](htt - ```go + ```go showLineNumbers package main import ( @@ -52,7 +52,7 @@ func main() { -```go +```go showLineNumbers package main import ( diff --git a/src/content/docs/aws/integrations/aws-sdks/java.mdx b/src/content/docs/aws/integrations/aws-sdks/java.mdx index 65f48995..1e5758b2 100644 --- a/src/content/docs/aws/integrations/aws-sdks/java.mdx +++ b/src/content/docs/aws/integrations/aws-sdks/java.mdx @@ -41,7 +41,7 @@ The client can be used to upload a file to an existing bucket and then retrieve -```java +```java showLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -57,7 +57,7 @@ AmazonS3 s3Client = AmazonS3ClientBuilder.standard() -```java +```java showLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -81,7 +81,7 @@ S3Client s3Client = S3Client.builder() -```java +```java showLineNumbers // Existing bucket name. final String BUCKET_NAME = "records"; @@ -110,7 +110,7 @@ BufferedReader reader = new BufferedReader(new InputStreamReader(objectInputStre -```java +```java showLineNumbers // Existing bucket name. final String BUCKET_NAME = "records"; @@ -154,7 +154,7 @@ The full list of supported converters can be found [here](https://sdk.amazonaws. -```java +```java showLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -172,7 +172,7 @@ private static AmazonDynamoDB dynamoDBClient = AmazonDynamoDBClientBuilder.stand ``` -```java +```java showLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -206,7 +206,7 @@ DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() -```java +```java showLineNumbers // Existing table name String TABLE_NAME = "person"; @@ -246,7 +246,7 @@ person.setBirthdateFromString(item.getString("birthdate")); -```java +```java showLineNumbers // Existing table name. String TABLE_NAME = "person"; diff --git a/src/content/docs/aws/integrations/aws-sdks/javascript.mdx b/src/content/docs/aws/integrations/aws-sdks/javascript.mdx index 259eb8eb..4a819638 100644 --- a/src/content/docs/aws/integrations/aws-sdks/javascript.mdx +++ b/src/content/docs/aws/integrations/aws-sdks/javascript.mdx @@ -23,7 +23,7 @@ Here is an example of how to create a Lambda client and an S3 client with the en -```javascript +```javascript showLineNumbers const AWS = require('aws-sdk'); // Configure the AWS SDK to use the LocalStack endpoint and credentials @@ -70,7 +70,7 @@ s3.listBuckets((err, data) => { ``` -```javascript +```javascript showLineNumbers const { LambdaClient, ListFunctionsCommand } = require('@aws-sdk/client-lambda'); const { S3Client, ListBucketsCommand } = require('@aws-sdk/client-s3'); diff --git a/src/content/docs/aws/integrations/aws-sdks/net.md b/src/content/docs/aws/integrations/aws-sdks/net.md index f57803e5..238c0770 100644 --- a/src/content/docs/aws/integrations/aws-sdks/net.md +++ b/src/content/docs/aws/integrations/aws-sdks/net.md @@ -86,7 +86,7 @@ The library aims to reduce the boilerplate required to set up LocalStack clients #### Dependency Injection Approach -```csharp +```csharp showLineNumbers public void ConfigureServices(IServiceCollection services) { // Add framework services. @@ -104,7 +104,7 @@ var amazonS3Client = serviceProvider.GetRequiredService(); #### Standalone Approach -```csharp +```csharp showLineNumbers var sessionOptions = new SessionOptions(); var configOptions = new ConfigOptions(); diff --git a/src/content/docs/aws/integrations/aws-sdks/php.md b/src/content/docs/aws/integrations/aws-sdks/php.md index 3df40212..c0e82467 100644 --- a/src/content/docs/aws/integrations/aws-sdks/php.md +++ b/src/content/docs/aws/integrations/aws-sdks/php.md @@ -15,7 +15,7 @@ which is the preferred way of integrating the PHP SDK with LocalStack. Here is an example of how to create an `S3Client` with the endpoint set to LocalStack. -```php +```php showLineNumbers use Aws\S3\S3Client; use Aws\Exception\AwsException; diff --git a/src/content/docs/aws/integrations/aws-sdks/python-boto3.md b/src/content/docs/aws/integrations/aws-sdks/python-boto3.md index ce9a3136..453bf5eb 100644 --- a/src/content/docs/aws/integrations/aws-sdks/python-boto3.md +++ b/src/content/docs/aws/integrations/aws-sdks/python-boto3.md @@ -11,7 +11,7 @@ sidebar: You can easily create a `boto3` client that interacts with your LocalStack instance. The example below creates a `boto3` client that lists all available Lambda functions: -```python +```python showLineNumbers import boto3 endpoint_url = "http://localhost.localstack.cloud:4566" diff --git a/src/content/docs/aws/integrations/aws-sdks/ruby.md b/src/content/docs/aws/integrations/aws-sdks/ruby.md index 908f621b..bdf34428 100644 --- a/src/content/docs/aws/integrations/aws-sdks/ruby.md +++ b/src/content/docs/aws/integrations/aws-sdks/ruby.md @@ -89,7 +89,7 @@ The S3 service endpoint differs slightly from the other service endpoints becaus For alternative AWS services, you can use the following configuration: -```ruby +```ruby showLineNumbers region = "us-east-2" Aws.config.update( endpoint: 'http://localhost:4566', # update with localstack endpoint diff --git a/src/content/docs/aws/integrations/containers/devcontainers.mdx b/src/content/docs/aws/integrations/containers/devcontainers.mdx index d27d763b..08f3e981 100644 --- a/src/content/docs/aws/integrations/containers/devcontainers.mdx +++ b/src/content/docs/aws/integrations/containers/devcontainers.mdx @@ -39,7 +39,7 @@ Before you start, ensure that you have the [DevContainer CLI](https://code.visua Create a JSON file called `options.json` with the desired options in it. -```json +```json showLineNumbers { "imageVariant": "bullseye", "awslocal": "true", @@ -219,7 +219,7 @@ Before you start, ensure that you have the [DevContainer CLI](https://code.visu Create a JSON file called `options.json` with the desired options in it. -```json +```json showLineNumbers { "imageVariant": "bookworm", "awslocal": "true", @@ -337,7 +337,7 @@ To get started with LocalStack and DevContainers in VS Code, follow these steps: -```json +```json showLineNumbers { "name": "LocalStack DooD setup", "dockerComposeFile": "docker-compose.yml", @@ -362,7 +362,7 @@ To get started with LocalStack and DevContainers in VS Code, follow these steps: ``` -```yml +```yml showLineNumbers services: localstack: container_name: "localstack-main" @@ -413,7 +413,7 @@ FROM mcr.microsoft.com/devcontainers/base:bookworm ``` -```bash +```bash showLineNumbers # Activate LocalStack Pro: https://docs.localstack.cloud/getting-started/auth-token/ LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:-} # required for Pro, not processed via template due to security reasons LOCALSTACK_API_KEY=${LOCALSTACK_API_KEY:-} diff --git a/src/content/docs/aws/integrations/containers/gitpod.md b/src/content/docs/aws/integrations/containers/gitpod.md index d15f3fb8..89610df3 100644 --- a/src/content/docs/aws/integrations/containers/gitpod.md +++ b/src/content/docs/aws/integrations/containers/gitpod.md @@ -23,7 +23,7 @@ To configure LocalStack on Gitpod, you would need to set up a `.gitpod.yml` on t The file configures your workspace and the environment that you would like to use. You can find more information on the [Gitpod documentation](https://www.gitpod.io/docs/config-gitpod-file/). -```yaml +```yaml showLineNumbers tasks: - name: start-localstack env: diff --git a/src/content/docs/aws/integrations/containers/kubernetes.mdx b/src/content/docs/aws/integrations/containers/kubernetes.mdx index ec3d7d9d..9af64edc 100644 --- a/src/content/docs/aws/integrations/containers/kubernetes.mdx +++ b/src/content/docs/aws/integrations/containers/kubernetes.mdx @@ -68,7 +68,7 @@ You can use this chart with LocalStack Pro by: You can set these values in a YAML file (in this example `pro-values.yaml`): -```yaml +```yaml showLineNumbers image: repository: localstack/localstack-pro @@ -79,7 +79,7 @@ extraEnvVars: If you have the LocalStack Auth Token in a secret, you can also reference it directly with `extraEnvVars`: -```yaml +```yaml showLineNumbers extraEnvVars: - name: LOCALSTACK_AUTH_TOKEN valueFrom: diff --git a/src/content/docs/aws/integrations/containers/rancher-desktop.mdx b/src/content/docs/aws/integrations/containers/rancher-desktop.mdx index 0e74e0f6..80b6faf7 100644 --- a/src/content/docs/aws/integrations/containers/rancher-desktop.mdx +++ b/src/content/docs/aws/integrations/containers/rancher-desktop.mdx @@ -123,7 +123,7 @@ Modify your Docker Compose configuration to work with Rancher Desktop: -```yml +```yml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -141,7 +141,7 @@ services: ``` -```yml +```yml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/integrations/continuous-integration/bitbucket.md b/src/content/docs/aws/integrations/continuous-integration/bitbucket.md index 355a13b1..d73b2af7 100644 --- a/src/content/docs/aws/integrations/continuous-integration/bitbucket.md +++ b/src/content/docs/aws/integrations/continuous-integration/bitbucket.md @@ -22,7 +22,7 @@ When you want to integrate LocalStack into your job configuration, you just have The following example BitBucket Pipeline configuration (`bitbucket-pipelines.yaml`) executes these steps, creates a new S3 bucket, and queries the list of S3 buckets: -```yaml +```yaml showLineNumbers image: python:3.9 definitions: @@ -69,7 +69,7 @@ To add a CI Auth Token to your BitBucket Pipeline: Navigate to your BitBucket Pipeline and add the following lines to the `bitbucket-pipelines.yaml` file: -```yaml +```yaml showLineNumbers pipelines: default: - step: diff --git a/src/content/docs/aws/integrations/continuous-integration/circleci.md b/src/content/docs/aws/integrations/continuous-integration/circleci.md index f1e1359e..70823967 100644 --- a/src/content/docs/aws/integrations/continuous-integration/circleci.md +++ b/src/content/docs/aws/integrations/continuous-integration/circleci.md @@ -17,7 +17,7 @@ LocalStack supports CircleCI out of the box and can be easily integrated into yo #### Default -```yaml +```yaml showLineNumbers version: '2.1' orbs: localstack: localstack/platform@2.2 @@ -36,7 +36,7 @@ workflows: #### Async -```yaml +```yaml showLineNumbers version: '2.1' orbs: localstack: localstack/platform@2.2 @@ -61,7 +61,7 @@ Read more about the [configuration options](/aws/capabilities/config/configurati #### Job level -```yaml +```yaml showLineNumbers ... jobs: localstack-test: @@ -75,7 +75,7 @@ jobs: #### Shell command -```yaml +```yaml showLineNumbers ... jobs: localstack-test: @@ -105,7 +105,7 @@ After the above steps, just start up LocalStack using our official orb as usual. ### Dump LocalStack logs -```yaml +```yaml showLineNumbers ... jobs: localstack-test: @@ -140,7 +140,7 @@ Update or create the Cloud Pod in it's own project (ie in a separate Infrastruct _Note: If there is a previously created Cloud Pod which doesn't need updating this step can be skipped._ -```yaml +```yaml showLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -168,7 +168,7 @@ workflows: In a separate project use the previously created base Cloud Pod as below: -```yaml +```yaml showLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -196,7 +196,7 @@ To use a dynamically updated Cloud Pod in multiple workflows but in the same pro Before you are able to use any stored artifacts in your pipeline, you must provide either a valid [project API token](https://circleci.com/docs/managing-api-tokens/#creating-a-project-api-token) or a [personal API token](https://circleci.com/docs/managing-api-tokens/#creating-a-personal-api-token) to CircleCI. -```yaml +```yaml showLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -275,7 +275,7 @@ Find out more about [Ephemeral Instances](/aws/capabilities/cloud-sandbox/epheme ##### Same job -```yaml +```yaml showLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -302,7 +302,7 @@ workflows: ##### Multiple jobs -```yaml +```yaml showLineNumbers ... jobs: setup-instance: @@ -350,7 +350,7 @@ workflows: This strategy persist LocalStack's state between jobs for the current workflow. -```yaml +```yaml showLineNumbers ... jobs: localstack-save-state: @@ -395,7 +395,7 @@ More information about Localstack's [state import/export](/aws/capabilities/stat To preserve state between workflow runs, you can take leverage of CircleCI's caching too. This strategy will persist LocalStack's state for every workflow re-runs, but not for different workflows. -```yaml +```yaml showLineNumbers ... jobs: localstack-update-state: diff --git a/src/content/docs/aws/integrations/continuous-integration/codebuild.md b/src/content/docs/aws/integrations/continuous-integration/codebuild.md index 20ee5a44..fdb75531 100644 --- a/src/content/docs/aws/integrations/continuous-integration/codebuild.md +++ b/src/content/docs/aws/integrations/continuous-integration/codebuild.md @@ -25,7 +25,7 @@ CodeBuild has the capability to use LocalStack's GitHub Action. #### Native Runner -```yml +```yml showLineNumbers version: 0.2 ... phases: @@ -39,7 +39,7 @@ phases: #### GitHub Actions Runner -```yml +```yml showLineNumbers version: 0.2 phases: @@ -60,7 +60,7 @@ Get know more about the LocalStack [config options](/aws/capabilities/config/con #### Native Runner -```yml +```yml showLineNumbers version: 0.2 env: @@ -73,7 +73,7 @@ phases: #### GitHub Actions Runner -```yml +```yml showLineNumbers version: 0.2 env: @@ -111,7 +111,7 @@ Navigate to the buildspec file and change the Docker image to `public.ecr.aws/lo #### Native Runner -```yaml +```yaml showLineNumbers ... phases: pre_build: @@ -123,7 +123,7 @@ phases: #### GitHub Actions Runner -```yml +```yml showLineNumbers ... phases: pre_build: @@ -140,7 +140,7 @@ phases: ### Dump LocalStack logs -```yaml +```yaml showLineNumbers ... artifacts: files: @@ -174,7 +174,7 @@ Find more information about cloud pods [here](/aws/capabilities/state-management ##### Native Runner -```yml +```yml showLineNumbers ... phases: pre_build: @@ -189,7 +189,7 @@ phases: ##### GitHub Actions Runner -```yml +```yml showLineNumbers ... phases: pre_build: @@ -214,7 +214,7 @@ phases: #### Ephemeral Instances (Preview) -```yml +```yml showLineNumbers ... phases: pre_build: @@ -243,7 +243,7 @@ Find out more about [ephemeral instances](/aws/capabilities/cloud-sandbox/epheme Find out more about [state management](/aws/capabilities/state-management/export-import-state/). -```yml +```yml showLineNumbers ... phases: pre_build: @@ -259,7 +259,7 @@ artifact: Alternatively save as a secondary artifact: -```yml +```yml showLineNumbers ... artifact: ... @@ -278,7 +278,7 @@ Additional information about [state export and import](/aws/capabilities/state-m ##### Native Runner -```yml +```yml showLineNumbers ... phases: pre_build: @@ -295,7 +295,7 @@ cache: ##### GitHub Actions Runner -```yml +```yml showLineNumbers ... phases: pre_build: diff --git a/src/content/docs/aws/integrations/continuous-integration/github-actions.md b/src/content/docs/aws/integrations/continuous-integration/github-actions.md index e31687ad..e980ec64 100644 --- a/src/content/docs/aws/integrations/continuous-integration/github-actions.md +++ b/src/content/docs/aws/integrations/continuous-integration/github-actions.md @@ -12,7 +12,7 @@ This page contains easily customisable snippets to show you how to manage LocalS ### Start up Localstack -```yaml +```yaml showLineNumbers - name: Start LocalStack uses: LocalStack/setup-localstack@v0.2.2 with: @@ -25,7 +25,7 @@ This page contains easily customisable snippets to show you how to manage LocalS To set LocalStack configuration options, you can use the `configuration` input parameter. For example, to set the `DEBUG` configuration option, you can use the following configuration: -```yml +```yml showLineNumbers - name: Start LocalStack uses: LocalStack/setup-localstack@v0.2.2 with: @@ -50,7 +50,7 @@ Click **Add secret** to save your secret. You can then use our [`setup-localstack`](https://github.com/localstack/setup-localstack) GitHub Action to start your LocalStack container, with the `LOCALSTACK_AUTH_TOKEN` environment variable: -```yaml +```yaml showLineNumbers - name: Start LocalStack uses: LocalStack/setup-localstack@v0.2.3 with: @@ -63,7 +63,7 @@ You can then use our [`setup-localstack`](https://github.com/localstack/setup-lo ### Dump Localstack logs -```yaml +```yaml showLineNumbers - name: Show localstack logs run: | localstack logs | tee localstack.log @@ -75,7 +75,7 @@ You can preserve your AWS infrastructure with Localstack in various ways. #### Cloud Pods -```yaml +```yaml showLineNumbers ... # Localstack is up and running already - name: Load the Cloud Pod @@ -106,7 +106,7 @@ Our Github Action contains the prebuilt functionality to spin up an ephemeral in First you need to deploy the preview: -```yaml +```yaml showLineNumbers name: Create PR Preview on: @@ -141,7 +141,7 @@ Find out more about ephemeral instances [here](/aws/capabilities/cloud-sandbox/e #### Artifact -```yaml +```yaml showLineNumbers ... - name: Start LocalStack and Load State uses: LocalStack/setup-localstack@v0.2.2 diff --git a/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md b/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md index 81572dc1..e0556cc5 100644 --- a/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md +++ b/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md @@ -28,7 +28,7 @@ HOSTNAME_EXTERNAL: localhost.localstack.cloud. #### Service -```yaml +```yaml showLineNumbers ... variables: DOCKER_SOCK: tcp://docker:2375 @@ -46,7 +46,7 @@ services: #### Container -```yaml +```yaml showLineNumbers image: docker:latest stages: @@ -87,7 +87,7 @@ After you create the variable, you can use it in the `.gitlab-ci.yml` file. However, variables set in the GitLab UI are not automatically passed down to service containers. You need to assign them as variables in the UI, and then re-assign them in your `.gitlab-ci.yml`. -```yaml +```yaml showLineNumbers ... variables: LOCALSTACK_AUTH_TOKEN: $LOCALSTACK_AUTH_TOKEN @@ -103,7 +103,7 @@ If the CI Auth Token activation fails, LocalStack container will exit with an er ### Dump Localstack logs -```yaml +```yaml showLineNumbers ... job: variables: @@ -121,7 +121,7 @@ You can preserve your AWS infrastructure with Localstack in various ways. #### Artifact -```yaml +```yaml showLineNumbers ... job: before_script: @@ -140,7 +140,7 @@ More info about Localstack's state export and import [here](/aws/capabilities/st #### Cache -```yaml +```yaml showLineNumbers ... job: before_script: @@ -163,7 +163,7 @@ Additional information about state export and import [here](/aws/capabilities/st #### Cloud Pod -```yaml +```yaml showLineNumbers ... job: before_script: @@ -178,7 +178,7 @@ Find more information about cloud pods [here](/aws/capabilities/state-management #### Ephemeral Instance (Preview) -```yaml +```yaml showLineNumbers ... variables: LOCALSTACK_AUTH_TOKEN: $LOCALSTACK_AUTH_TOKEN diff --git a/src/content/docs/aws/integrations/continuous-integration/travis-ci.md b/src/content/docs/aws/integrations/continuous-integration/travis-ci.md index 893eb65e..f4fe2e12 100644 --- a/src/content/docs/aws/integrations/continuous-integration/travis-ci.md +++ b/src/content/docs/aws/integrations/continuous-integration/travis-ci.md @@ -19,7 +19,7 @@ When you want to integrate LocalStack into your job configuration, you just have The following example Travis CI job config (`.travis.yaml`) executes these steps, creates a new S3 bucket, and prints a nice message in the end: -```yaml +```yaml showLineNumbers language: python services: @@ -56,7 +56,7 @@ To configure this in Travis CI, go to the project settings (`More options` → ` Here is an example workflow: -```yaml +```yaml showLineNumbers before_install: # Install the LocalStack CLI and awslocal - python -m pip install localstack awscli-local[ver1] diff --git a/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md b/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md index a1420cec..f196840a 100644 --- a/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md +++ b/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md @@ -60,7 +60,7 @@ You can navigate to the LocalStack logs to verify that the EC2 instance was crea You can now create a Cloud Custodian policy to stop the EC2 instances with the tag `Custodian`. Create a file named `custodian.yml` and add the following content: -```yaml +```yaml showLineNumbers policies: - name: my-first-policy resource: aws.ec2 diff --git a/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md b/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md index 80dfc19f..51cdfd73 100644 --- a/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md +++ b/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md @@ -116,7 +116,7 @@ EOF ``` Finally, we create an AWS `ProviderConfig` that references the secret created above, and defines a static `endpoint` pointing to the LocalStack URL `http://host.docker.internal:4566`: -```bash +```bash showLineNumbers cat < -```python +```python showLineNumbers AWS_CONFIG = { "region": "us-east-1", "endpoints": [ @@ -292,7 +292,7 @@ AWS_CONFIG = { ``` -```javascript +```javascript showLineNumbers export const AWS_CONFIG = { region: "us-east-1", endpoints: [ @@ -331,7 +331,7 @@ You can further import the above configuration in your project's code, and use i -```python +```python showLineNumbers ... from localstack_config import AWS_CONFIG ... @@ -340,7 +340,7 @@ AwsProvider(self, "Aws", **AWS_CONFIG) ``` -```javascript +```javascript showLineNumbers ... import { AWS_CONFIG } from "./localstack-config"; ... @@ -372,7 +372,7 @@ Create a new directory named `cdktf-localstack` and initialize a new CDKTF proje -```python +```python showLineNumbers $ cdktf init ... ? Do you want to continue with Terraform Cloud remote state management? No @@ -389,7 +389,7 @@ Note: You can always add providers using 'cdktf provider add' later on ``` -```javascript +```javascript showLineNumbers $ cdktf init ... ? Do you want to continue with Terraform Cloud remote state management? No @@ -426,7 +426,7 @@ Add the following code to import the AWS provider and create a new S3 bucket in -```python +```python showLineNumbers # !/usr/bin/env python from constructs import Construct @@ -458,7 +458,7 @@ app.synth() ``` -```javascript +```javascript showLineNumbers import { Construct } from "constructs"; import { App, TerraformStack } from "cdktf"; import { AwsProvider } from "@cdktf/provider-aws/lib/provider"; @@ -498,7 +498,7 @@ cdktf synth && cdktf deploy You should see the following output: -```sh +```sh showLineNumbers example Initializing the backend... example Successfully configured the backend "local"! Terraform will automatically @@ -543,7 +543,7 @@ Terragrunt is an open-source wrapper for Terraform that provides extra tools for A sample `terragrunt.hcl` configuration file to use with LocalStack is shown below: -```hcl +```hcl showLineNumbers generate "provider" { path = "provider.tf" if_exists = "overwrite_terragrunt" diff --git a/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md b/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md index fb5f94a7..50af5dd1 100644 --- a/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md +++ b/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md @@ -16,13 +16,13 @@ You can find the [example Docker Compose](docker-compose.yml) file which contain 1. Run Docker Compose: -```bash +```bash showLineNumbers docker-compose up -d ``` 2. Create the Lambda function: -```bash +```bash showLineNumbers awslocal lambda create-function \ --function-name fun1 \ --handler lambda.handler \ @@ -54,7 +54,7 @@ awslocal lambda create-function \ 3. Create an example secret: -```bash +```bash showLineNumbers awslocal secretsmanager create-secret --name localstack { "ARN": "arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI", @@ -65,14 +65,14 @@ awslocal secretsmanager create-secret --name localstack 4. Create an example Kafka topic: -```bash +```bash showLineNumbers docker exec -ti kafka kafka-topics --zookeeper zookeeper:2181 --create --replication-factor 1 --partitions 1 --topic t1 Created topic t1. ``` 5. Create the event source mapping to your local kafka cluster: -```bash +```bash showLineNumbers awslocal lambda create-event-source-mapping \ --topics t1 \ --source-access-configuration Type=SASL_SCRAM_512_AUTH,URI=arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI \ diff --git a/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md b/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md index b131ccff..d6a59013 100644 --- a/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md +++ b/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md @@ -28,7 +28,7 @@ To get started with HyperExecute, you need to fulfill the following prerequisite Create a new file named `he.yml` in the root directory of your project and add the following content: -```yaml +```yaml showLineNumbers version: "0.1" runson: linux autosplit: true @@ -58,7 +58,7 @@ Subsequently, you need to add your LocalStack Auth Token to your HyperExecute Po To enable test execution on HyperExecute, you need to add the following content to your GitHub Actions workflow file: -```yaml +```yaml showLineNumbers version: "0.1" runson: linux ... @@ -98,7 +98,7 @@ You can find your access key in the HyperExecute Portal. In this example, we will use GitHub Actions to run the tests in the CI pipeline. To do so, you need to add the following content to your GitHub Actions workflow file in `.github/workflows/main.yml`: -```yaml +```yaml showLineNumbers name: Running tests on HyperExecute on: diff --git a/src/content/docs/aws/integrations/testing/testcontainers.mdx b/src/content/docs/aws/integrations/testing/testcontainers.mdx index eac26ef5..8086d679 100644 --- a/src/content/docs/aws/integrations/testing/testcontainers.mdx +++ b/src/content/docs/aws/integrations/testing/testcontainers.mdx @@ -39,7 +39,7 @@ go get github.com/testcontainers/testcontainers-go/modules/localstack ``` -```java +```java showLineNumbers org.testcontainers localstack @@ -64,7 +64,7 @@ npm i @testcontainers/localstack -```csharp +```csharp showLineNumbers var localStackContainer = new LocalStackBuilder().Build(); await localStackContainer.StartAsync() @@ -92,7 +92,7 @@ const localstack = new LocalstackContainer("localstack/localstack:3").start() -```csharp +```csharp showLineNumbers var config = new AmazonS3Config(); config.ServiceURL = localStackContainer.GetConnectionString(); using var client = new AmazonS3Client(config); @@ -144,7 +144,7 @@ func s3Client(ctx context.Context, l *localstack.LocalStackContainer) (*s3.Clien ``` -```java +```java showLineNumbers S3Client s3 = S3Client.builder() .endpointOverride(localstack.getEndpoint()) .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(localstack.getAccessKey(), localstack.getSecretKey()))) @@ -153,7 +153,7 @@ S3Client s3 = S3Client.builder() ``` -```typescript +```typescript showLineNumbers const awsConfig = { endpoint: localstack.getConnectionUri(), credentials: { diff --git a/src/content/docs/aws/services/account.mdx b/src/content/docs/aws/services/account.mdx index 3d7d0b3c..e30d9569 100644 --- a/src/content/docs/aws/services/account.mdx +++ b/src/content/docs/aws/services/account.mdx @@ -33,7 +33,7 @@ We will demonstrate how to put contact information, fetch account details, and a You can use the [`PutContactInformation`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutContactInformation.html) API to add or update the contact information for your AWS account. Run the following command to add contact information to your account: -```bash +```bash showLineNumbers awslocal account put-contact-information \ --contact-information '{ "FullName": "Jane Doe", @@ -55,7 +55,7 @@ Run the following command to fetch the contact information for your account: awslocal account get-contact-information ``` -```bash title="Output" +```bash title="Output" showLineNumbers { "ContactInformation": { "AddressLine1": "XXXX Main St", @@ -74,7 +74,7 @@ awslocal account get-contact-information You can attach an alternate contact using [`PutAlternateContact`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutAlternateContact.html) API. Run the following command to attach an alternate contact to your account: -```bash +```bash showLineNumbers awslocal account put-alternate-contact \ --alternate-contact-type "BILLING" \ --email-address "bill@ing.com" \ diff --git a/src/content/docs/aws/services/acm.mdx b/src/content/docs/aws/services/acm.mdx index 7bdcb03c..b6d84c20 100644 --- a/src/content/docs/aws/services/acm.mdx +++ b/src/content/docs/aws/services/acm.mdx @@ -27,7 +27,7 @@ Start your LocalStack container using your preferred method, then use the [Reque Specify the domain name you want to request the certificate for, and any additional options you need. Here's an example command: -```bash +```bash showLineNumbers awslocal acm request-certificate \ --domain-name www.example.com \ --validation-method DNS \ diff --git a/src/content/docs/aws/services/apacheflink.mdx b/src/content/docs/aws/services/apacheflink.mdx index 020a93bb..bb272d08 100644 --- a/src/content/docs/aws/services/apacheflink.mdx +++ b/src/content/docs/aws/services/apacheflink.mdx @@ -80,7 +80,7 @@ Without the proper permissions policy and role, this example application will no Create an IAM role for the running MSF application to assume. -```json +```json showLineNumbers # role.json { "Version": "2012-10-17", @@ -100,7 +100,7 @@ awslocal iam create-role --role-name msaf-role --assume-role-policy-document fil Next create add a permissions policy to this role that permits read and write access to S3. -```json +```json showLineNumbers # policy.json { "Version": "2012-10-17", @@ -124,7 +124,7 @@ Now, when the running MSF application assumes this role, it will have the necess With all prerequisite resources in place, the Flink application can now be created and started. -```bash +```bash showLineNumbers awslocal kinesisanalyticsv2 create-application \ --application-name msaf-app \ --runtime-environment FLINK-1_20 \ @@ -171,7 +171,7 @@ There are following prerequisites for CloudWatch Logs integration: To add a logging option: -```bash +```bash showLineNumbers awslocal kinesisanalyticsv2 add-application-cloud-watch-logging-option \ --application-name msaf-app \ --cloud-watch-logging-option '{"LogStreamARN": "arn:aws:logs:us-east-1:000000000000:log-group:msaf-log-group:log-stream:msaf-log-stream"}' diff --git a/src/content/docs/aws/services/apigateway.mdx b/src/content/docs/aws/services/apigateway.mdx index b81d6b81..f114ca6c 100644 --- a/src/content/docs/aws/services/apigateway.mdx +++ b/src/content/docs/aws/services/apigateway.mdx @@ -30,7 +30,7 @@ The Lambda function will be invoked with a `GET` request and return a response w Create a new file named `lambda.js` with the following contents: -```javascript +```javascript showLineNumbers 'use strict' const apiHandler = (payload, context, callback) => { @@ -51,7 +51,7 @@ The above code defines a function named `apiHandler` that returns a response wit Zip the file and upload it to LocalStack using the `awslocal` CLI. Run the following command: -```bash +```bash showLineNumbers zip function.zip lambda.js awslocal lambda create-function \ --function-name apigw-lambda \ @@ -120,7 +120,7 @@ You'll need this ID for the next step. Create a new resource for the API using the [`CreateResource`](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateResource.html) API. Use the ID of the resource returned in the previous step as the parent ID: -```bash +```bash showLineNumbers awslocal apigateway create-resource \ --rest-api-id \ --parent-id \ @@ -144,7 +144,7 @@ You'll need this Resource ID for the next step. Add a `GET` method to the resource using the [`PutMethod`](https://docs.aws.amazon.com/apigateway/latest/api/API_PutMethod.html) API. Use the ID of the resource returned in the previous step as the Resource ID: -```bash +```bash showLineNumbers awslocal apigateway put-method \ --rest-api-id \ --resource-id \ @@ -166,7 +166,7 @@ awslocal apigateway put-method \ Now, create a new integration for the method using the [`PutIntegration`](https://docs.aws.amazon.com/apigateway/latest/api/API_PutIntegration.html) API. -```bash +```bash showLineNumbers awslocal apigateway put-integration \ --rest-api-id \ --resource-id \ @@ -256,7 +256,7 @@ LocalStack provides additional features and functionality on top of the official To demonstrate how to access APIs through LocalStack's local domain name, consider the following Serverless configuration that shows two Lambda functions (`serviceV1` and `serviceV2`) that are connected to an API Gateway v1 (`http` event) and an API Gateway v2 endpoint (`httpApi` event), respectively: -```yaml +```yaml showLineNumbers ... plugins: - serverless-localstack @@ -332,7 +332,7 @@ http://localhost:4566/restapis///_user_request_/ WebSocket APIs provide real-time communication channels between a client and a server. To use WebSockets in LocalStack, you can define a WebSocket route in your Serverless configuration: -```yaml +```yaml showLineNumbers ... plugins: - serverless-localstack @@ -403,7 +403,7 @@ awslocal apigateway create-rest-api --name my-api --tags '{"_custom_id_":"myid12 You can also configure the protocol type, the possible values being `HTTP` and `WEBSOCKET`: -```bash +```bash showLineNumbers awslocal apigatewayv2 create-api \ --name=my-api \ --protocol-type=HTTP --tags="_custom_id_=my-api" diff --git a/src/content/docs/aws/services/appautoscaling.mdx b/src/content/docs/aws/services/appautoscaling.mdx index 6e740359..8ba59cb2 100644 --- a/src/content/docs/aws/services/appautoscaling.mdx +++ b/src/content/docs/aws/services/appautoscaling.mdx @@ -28,7 +28,7 @@ We will demonstrate how you can configure auto scaling to handle a heavy workloa To create a new Lambda function, create a new file called `index.js` with the following code: -```js +```js showLineNumbers exports.handler = async (event, context) => { console.log('Hello from Lambda!'); return { diff --git a/src/content/docs/aws/services/appconfig.mdx b/src/content/docs/aws/services/appconfig.mdx index cc69e1ae..26644b3a 100644 --- a/src/content/docs/aws/services/appconfig.mdx +++ b/src/content/docs/aws/services/appconfig.mdx @@ -95,7 +95,7 @@ The following output would be retrieved: You can now create a JSON file to add your feature flag configuration data. Create a file named `feature-flag-config.json` with the following content: -```json +```json showLineNumbers { "allow_mobile_payments": { "enabled": false diff --git a/src/content/docs/aws/services/appsync.mdx b/src/content/docs/aws/services/appsync.mdx index 4462afb6..dadbfc57 100644 --- a/src/content/docs/aws/services/appsync.mdx +++ b/src/content/docs/aws/services/appsync.mdx @@ -98,7 +98,7 @@ awslocal appsync create-api-key \ Create a file named `schema.graphql` with the following content: -```graphql +```graphql showLineNumbers type Note { NoteId: ID! title: String diff --git a/src/content/docs/aws/services/athena.mdx b/src/content/docs/aws/services/athena.mdx index d782d5f2..f55a1104 100644 --- a/src/content/docs/aws/services/athena.mdx +++ b/src/content/docs/aws/services/athena.mdx @@ -235,7 +235,7 @@ print(cursor.fetchall()) -```python +```python showLineNumbers import awswrangler as wr import pandas as pd diff --git a/src/content/docs/aws/services/backup.mdx b/src/content/docs/aws/services/backup.mdx index f6261caa..7357533a 100644 --- a/src/content/docs/aws/services/backup.mdx +++ b/src/content/docs/aws/services/backup.mdx @@ -47,7 +47,7 @@ awslocal backup create-backup-vault \ You can create a backup plan which specifies the backup vault to store the backups in and the schedule for creating backups. You can specify the backup plan in a `backup-plan.json` file: -```json +```json showLineNumbers { "BackupPlanName": "testplan", "Rules": [{ @@ -91,7 +91,7 @@ awslocal backup create-backup-plan \ You can create a backup selection which specifies the resources to backup and the backup plan to associate with. You can specify the backup selection in a `backup-selection.json` file: -```json +```json showLineNumbers { "SelectionName": "Myselection", "IamRoleArn": "arn:aws:iam::000000000000:role/service-role/AWSBackupDefaultServiceRole", diff --git a/src/content/docs/aws/services/cloudformation.mdx b/src/content/docs/aws/services/cloudformation.mdx index 2d4866ff..a62755da 100644 --- a/src/content/docs/aws/services/cloudformation.mdx +++ b/src/content/docs/aws/services/cloudformation.mdx @@ -34,7 +34,7 @@ Use the following code snippet and save the content in either `cfn-quickstart-st -```yaml +```yaml showLineNumbers Resources: LocalBucket: Type: AWS::S3::Bucket @@ -43,7 +43,7 @@ Resources: ``` -```json +```json showLineNumbers { "Resources": { "LocalBucket": { From 2dcdf92affe530d047f91cbc6fa0dbcbd0c04836 Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Mon, 23 Jun 2025 22:31:55 -0700 Subject: [PATCH 2/8] more aws services done --- src/content/docs/aws/services/codebuild.mdx | 14 +++++++------- src/content/docs/aws/services/codepipeline.mdx | 6 +++--- src/content/docs/aws/services/cognito.mdx | 6 +++--- src/content/docs/aws/services/docdb.mdx | 4 ++-- src/content/docs/aws/services/dynamodbstreams.mdx | 4 ++-- src/content/docs/aws/services/ecs.mdx | 6 +++--- src/content/docs/aws/services/eks.mdx | 6 +++--- .../docs/aws/services/elementalmediaconvert.mdx | 2 +- src/content/docs/aws/services/es.mdx | 2 +- src/content/docs/aws/services/events.mdx | 2 +- src/content/docs/aws/services/fis.mdx | 2 +- src/content/docs/aws/services/glue.mdx | 2 +- src/content/docs/aws/services/iot.mdx | 4 ++-- src/content/docs/aws/services/iotanalytics.mdx | 2 +- src/content/docs/aws/services/iotwireless.mdx | 2 +- src/content/docs/aws/services/kinesis.mdx | 2 +- src/content/docs/aws/services/lakeformation.mdx | 2 +- src/content/docs/aws/services/lambda.mdx | 2 +- .../docs/aws/services/managedblockchain.mdx | 4 ++-- src/content/docs/aws/services/neptune.mdx | 4 ++-- 20 files changed, 39 insertions(+), 39 deletions(-) diff --git a/src/content/docs/aws/services/codebuild.mdx b/src/content/docs/aws/services/codebuild.mdx index 56f1e103..28ab21fa 100644 --- a/src/content/docs/aws/services/codebuild.mdx +++ b/src/content/docs/aws/services/codebuild.mdx @@ -47,7 +47,7 @@ Let us walk through these files. It does nothing more than print a salutation message. Create a `MessageUtil.java` file and save it into the `src/main/java` directory. -```java +```java showLineNumbers public class MessageUtil { private String message; @@ -71,7 +71,7 @@ public class MessageUtil { Every build needs to be tested. Therefore, create the `TestMessageUtil.java` file in the `src/test/java` directory. -```java +```java showLineNumbers import org.junit.Test; import org.junit.Ignore; import static org.junit.Assert.assertEquals; @@ -101,7 +101,7 @@ This small suite simply verifies that the greeting message is built correctly. Finally, we need a `pom.xml` file to instruct Maven about what to build and which artifact needs to be produced. Create this file at the root of your directory. -```xml +```xml showLineNumbers @@ -140,7 +140,7 @@ A `buildspec` file is a collection of settings and commands, specified in YAML f Create this `buildspec.yml` file in the root directory. -```yaml +```yaml showLineNumbers version: 0.2 phases: @@ -200,7 +200,7 @@ awslocal s3 cp MessageUtil.zip s3://codebuild-demo-input To properly work, AWS CodeBuild needs access to other AWS services, e.g., to retrieve the source code from a S3 bucket. Create a `create-role.json` file with following content: -```json +```json showLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -227,7 +227,7 @@ it will be needed to create the CodeBuild project later on. Let us now define a policy for the created role. Create a `put-role-policy.json` file with the following content: -```json +```json showLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -302,7 +302,7 @@ awslocal codebuild create-project --generate-cli-skeleton From the generated file, change the source and the artifact location to match the S3 bucket names you just created. Similarly, fill in the ARN of the CodeBuild service role. -```json {hl_lines=[5,9,16]} +```json {hl_lines=[5,9,16]} showLineNumbers { "name": "codebuild-demo-project", "source": { diff --git a/src/content/docs/aws/services/codepipeline.mdx b/src/content/docs/aws/services/codepipeline.mdx index 4dc362a8..78b290c2 100644 --- a/src/content/docs/aws/services/codepipeline.mdx +++ b/src/content/docs/aws/services/codepipeline.mdx @@ -61,7 +61,7 @@ This requires a properly configured IAM role that our pipeline can assume. Create the role and make note of the role ARN: -```json +```json showLineNumbers # role.json { "Version": "2012-10-17", @@ -85,7 +85,7 @@ awslocal iam create-role --role-name role --assume-role-policy-document file://r Now add a permissions policy to this role that permits read and write access to S3. -```json +```json showLineNumbers # policy.json { "Version": "2012-10-17", @@ -121,7 +121,7 @@ This is a deploy action which uploads the file to the target bucket. Pay special attention to `roleArn`, `artifactStore.location` as well as `S3Bucket`, `S3ObjectKey`, and `BucketName`. These correspond to the resources we created earlier. -```json {hl_lines=[6,9,26,27,52]} +```json {hl_lines=[6,9,26,27,52]} showLineNumbers # declaration.json { "name": "pipeline", diff --git a/src/content/docs/aws/services/cognito.mdx b/src/content/docs/aws/services/cognito.mdx index 4c2fcb58..4689c25f 100644 --- a/src/content/docs/aws/services/cognito.mdx +++ b/src/content/docs/aws/services/cognito.mdx @@ -237,7 +237,7 @@ Cognito offers a variety of lifecycle hooks called Cognito Lambda triggers, whic To illustrate, suppose you wish to define a _user migration_ Lambda trigger in order to migrate users from your existing user directory into Amazon Cognito user pools at sign-in. In this case, you can start by creating a Lambda function, let's say named `"migrate_users"`, responsible for performing the migration by creating a new file `index.js` with the following code: -```javascript +```javascript showLineNumbers const validUsers = { belladonna: { password: "12345678Aa!", emailAddress: "bella@example.com" }, }; @@ -379,7 +379,7 @@ awslocal cognito-idp create-resource-server \ You can retrieve the token from your application using the specified endpoint: `http://cognito-idp.localhost.localstack.cloud:4566/_aws/cognito-idp/oauth2/token`. -```javascript +```javascript showLineNumbers require('dotenv').config(); const axios = require('axios'); @@ -419,7 +419,7 @@ Furthermore, you have the option to combine Cognito and LocalStack seamlessly wi For instance, consider this snippet from a `serverless.yml` configuration: -```yaml +```yaml showLineNumbers service: test plugins: diff --git a/src/content/docs/aws/services/docdb.mdx b/src/content/docs/aws/services/docdb.mdx index ec2fb46d..1ddd9433 100644 --- a/src/content/docs/aws/services/docdb.mdx +++ b/src/content/docs/aws/services/docdb.mdx @@ -249,7 +249,7 @@ npm install mongodb@6.3.0 Next, copy the following code into a new file named `index.js` in the `resources` folder: -```javascript +```javascript showLineNumbers const AWS = require('aws-sdk'); const RDS = AWS.RDS; const { MongoClient } = require('mongodb'); @@ -340,7 +340,7 @@ Secrets follow a [well-defined pattern](https://docs.aws.amazon.com/secretsmanag For the lambda function, you can pass the secret arn as `SECRET_NAME`. In the lambda, you can then retrieve the secret details like this: -```javascript +```javascript showLineNumbers const AWS = require('aws-sdk'); const { MongoClient } = require('mongodb'); diff --git a/src/content/docs/aws/services/dynamodbstreams.mdx b/src/content/docs/aws/services/dynamodbstreams.mdx index b90d927b..630f98e8 100644 --- a/src/content/docs/aws/services/dynamodbstreams.mdx +++ b/src/content/docs/aws/services/dynamodbstreams.mdx @@ -55,7 +55,7 @@ You can notice that in the `LatestStreamArn` field of the response: You can now create a Lambda function (`publishNewBark`) to process stream records from `BarkTable`. Create a new file named `index.js` with the following code: -```javascript +```javascript showLineNumbers 'use strict'; var AWS = require("aws-sdk"); @@ -98,7 +98,7 @@ awslocal lambda create-function \ To test the Lambda function, you can invoke it using the [`Invoke`](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) API. Create a new file named `payload.json` with the following content: -```json +```json showLineNumbers { "Records": [ { diff --git a/src/content/docs/aws/services/ecs.mdx b/src/content/docs/aws/services/ecs.mdx index 9ec1e7ee..d5a83fd9 100644 --- a/src/content/docs/aws/services/ecs.mdx +++ b/src/content/docs/aws/services/ecs.mdx @@ -61,7 +61,7 @@ awslocal ecs create-cluster --cluster-name mycluster Containers within tasks are defined by a task definition that is managed outside of the context of a cluster. To create a task definition that runs an `ubuntu` container forever (by running an infinite loop printing "Running" on startup), create the following file as `task_definition.json`: -```json +```json showLineNumbers { "containerDefinitions": [ { @@ -296,7 +296,7 @@ ecs_client.register_task_definition( The same functionality can be achieved with the AWS CDK following this (Python) example: -```python +```python showLineNumbers task_definition = ecs.TaskDefinition( ... volumes=[ @@ -322,7 +322,7 @@ Your file paths might differ, so check Docker's documentation on [Environment Va Here is a Docker Compose example: -```yaml +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/services/eks.mdx b/src/content/docs/aws/services/eks.mdx index b409824c..cb119fe6 100644 --- a/src/content/docs/aws/services/eks.mdx +++ b/src/content/docs/aws/services/eks.mdx @@ -307,7 +307,7 @@ To enable HTTPS for your endpoints, you can configure Kubernetes to use SSL/TLS The local EKS cluster comes pre-configured with a secret named `ls-secret-tls`, which can be conveniently utilized to define the `tls` section in the ingress configuration: -```yaml +```yaml showLineNumbers apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -425,7 +425,7 @@ In such cases, path-based routing may not be ideal if you need the services to b To address this requirement, we recommend utilizing host-based routing rules, as demonstrated in the example below: -```bash +```bash showLineNumbers cat < { diff --git a/src/content/docs/aws/services/fis.mdx b/src/content/docs/aws/services/fis.mdx index 1491c54b..04188559 100644 --- a/src/content/docs/aws/services/fis.mdx +++ b/src/content/docs/aws/services/fis.mdx @@ -53,7 +53,7 @@ We will demonstrate how to create an experiment that stops EC2 instances. Create a new file named `create-experiment.json`. This file should contain a JSON configuration that will be utilized during the subsequent invocation of the [`CreateExperimentTemplate`](https://docs.aws.amazon.com/fis/latest/APIReference/API_CreateExperimentTemplate.html) API. -```json +```json showLineNumbers { "actions": { "StopInstance": { diff --git a/src/content/docs/aws/services/glue.mdx b/src/content/docs/aws/services/glue.mdx index c6943866..8eead869 100644 --- a/src/content/docs/aws/services/glue.mdx +++ b/src/content/docs/aws/services/glue.mdx @@ -67,7 +67,7 @@ awslocal glue get-tables --database db1 Create a new PySpark script named `job.py` with the following code: -```python +```python showLineNumbers from pyspark.sql import SparkSession def init_spark(): diff --git a/src/content/docs/aws/services/iot.mdx b/src/content/docs/aws/services/iot.mdx index 47589106..22658a5b 100644 --- a/src/content/docs/aws/services/iot.mdx +++ b/src/content/docs/aws/services/iot.mdx @@ -79,7 +79,7 @@ AWS provides its root CA certificate at [`https://www.amazontrust.com/repository When connecting to the endpoints, you will need to provide this root CA certificate for authentication. This is illustrated below with Python [AWS IoT SDK](https://docs.aws.amazon.com/iot/latest/developerguide/iot-sdks.html), -```py +```py showLineNumbers import awscrt import boto3 from awsiot import mqtt_connection_builder @@ -127,7 +127,7 @@ For details on how ALPN works with AWS, see [this page](https://docs.aws.amazon. The client certificate and key can be retrieved using `CreateKeysAndCertificate` operation. The certificate is signed by the LocalStack root CA. -```py +```py showLineNumbers result = iot_client.create_keys_and_certificate(setAsActive=True) # Path to file with saved content `result["certificatePem"]` diff --git a/src/content/docs/aws/services/iotanalytics.mdx b/src/content/docs/aws/services/iotanalytics.mdx index deb54912..33459454 100644 --- a/src/content/docs/aws/services/iotanalytics.mdx +++ b/src/content/docs/aws/services/iotanalytics.mdx @@ -85,7 +85,7 @@ awslocal iotanalytics create-pipeline --cli-input-json file://mypipeline.json The `mypipeline.json` file contains the following content: -```json +```json showLineNumbers { "pipelineName": "mypipeline", "pipelineActivities": [ diff --git a/src/content/docs/aws/services/iotwireless.mdx b/src/content/docs/aws/services/iotwireless.mdx index d89e167d..d972f512 100644 --- a/src/content/docs/aws/services/iotwireless.mdx +++ b/src/content/docs/aws/services/iotwireless.mdx @@ -69,7 +69,7 @@ awslocal iotwireless create-wireless-device \ The `input.json` file contains the following content: -```json title="input.json" +```json title="input.json" showLineNumbers { "Description": "My LoRaWAN wireless device", "DestinationName": "IoTWirelessDestination", diff --git a/src/content/docs/aws/services/kinesis.mdx b/src/content/docs/aws/services/kinesis.mdx index c1c7ea2e..10eed089 100644 --- a/src/content/docs/aws/services/kinesis.mdx +++ b/src/content/docs/aws/services/kinesis.mdx @@ -29,7 +29,7 @@ We will demonstrate how to create a Lambda function to consume events from a Kin You need to create a Lambda function that receives a Kinesis event input and processes the messages that it contains. Create a file named `index.mjs` with the following content: -```javascript +```javascript showLineNumbers console.log('Loading function'); export const handler = (event, context) => { diff --git a/src/content/docs/aws/services/lakeformation.mdx b/src/content/docs/aws/services/lakeformation.mdx index ead058c7..753a7d8f 100644 --- a/src/content/docs/aws/services/lakeformation.mdx +++ b/src/content/docs/aws/services/lakeformation.mdx @@ -71,7 +71,7 @@ awslocal lakeformation list-resources You can grant permissions to a user or group using the [`GrantPermissions`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_GrantPermissions.html) API. Create a file named `permissions.json` with the following content: -```json +```json showLineNumbers { "CatalogId": "000000000000", "Principal": { diff --git a/src/content/docs/aws/services/lambda.mdx b/src/content/docs/aws/services/lambda.mdx index b2760b91..767c8deb 100644 --- a/src/content/docs/aws/services/lambda.mdx +++ b/src/content/docs/aws/services/lambda.mdx @@ -30,7 +30,7 @@ With the Function URL property, you can call a Lambda Function via an HTTP API c To create a new Lambda function, create a new file called `index.js` with the following code: -```javascript +```javascript showLineNumbers exports.handler = async (event) => { let body = JSON.parse(event.body) const product = body.num1 * body.num2; diff --git a/src/content/docs/aws/services/managedblockchain.mdx b/src/content/docs/aws/services/managedblockchain.mdx index b9858530..f77ef014 100644 --- a/src/content/docs/aws/services/managedblockchain.mdx +++ b/src/content/docs/aws/services/managedblockchain.mdx @@ -26,7 +26,7 @@ We will demonstrate how to create a blockchain network, a node, and a proposal. You can create a blockchain network using the [`CreateNetwork`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNetwork.html) API. Run the following command to create a network named `OurBlockchainNet` which uses the Hyperledger Fabric with the following configuration: -```bash +```bash showLineNumbers awslocal managedblockchain create-network \ --cli-input-json '{ "Name": "OurBlockchainNet", @@ -81,7 +81,7 @@ Copy the `NetworkId` and `MemberId` values from the output of the above command, You can create a node using the [`CreateNode`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNode.html) API. Run the following command to create a node with the following configuration: -```bash +```bash showLineNumbers awslocal managedblockchain create-node \ --node-configuration '{ "InstanceType": "bc.t3.small", diff --git a/src/content/docs/aws/services/neptune.mdx b/src/content/docs/aws/services/neptune.mdx index dc272739..02556448 100644 --- a/src/content/docs/aws/services/neptune.mdx +++ b/src/content/docs/aws/services/neptune.mdx @@ -91,7 +91,7 @@ To start a connection you have to use the `ws` protocol. Here is an example that uses Python and [`gremlinpython`](https://pypi.org/project/gremlinpython/) to connect to the database: -```python +```python showLineNumbers from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection from gremlin_python.process.anonymous_traversal import traversal from gremlin_python.process.traversal import Bindings, T, gt @@ -279,7 +279,7 @@ This feature is in beta and any feedback is appreciated. Here is an example of how to use the `GraphSONSerializersV3d0` serializer with `gremlinpython==3.6.2`: -```python +```python showLineNumbers from gremlin_python.driver import serializer from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection from gremlin_python.process.anonymous_traversal import traversal From e27fcfcf3bf9a4d58ba0c05bb492c36653e04910 Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Mon, 23 Jun 2025 22:43:52 -0700 Subject: [PATCH 3/8] more aws services done --- src/content/docs/aws/services/opensearch.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/content/docs/aws/services/opensearch.mdx b/src/content/docs/aws/services/opensearch.mdx index a7f9c565..7992f962 100644 --- a/src/content/docs/aws/services/opensearch.mdx +++ b/src/content/docs/aws/services/opensearch.mdx @@ -171,7 +171,7 @@ IAM support is also not yet available. A secure OpenSearch domain can be spawned with this example CLI input. Save it in a file named `opensearch_domain.json`. -```json title="opensearch_domain.json" +```json title="opensearch_domain.json" showLineNumbers { "DomainName": "secure-domain", "ClusterConfig": { @@ -284,7 +284,7 @@ It's important to bear in mind that only a single backend configuration is possi Here is a sample `docker-compose.yaml` file that contains a single-node OpenSearch cluster and a basic LocalStack setup. -```yaml +```yaml showLineNumbers services: opensearch: container_name: opensearch From 01991ee3066614f93033ae4a032a822858b9fa59 Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Tue, 24 Jun 2025 14:17:43 -0700 Subject: [PATCH 4/8] finished aws docs --- src/content/docs/aws/services/rds.mdx | 4 +-- src/content/docs/aws/services/s3.mdx | 6 ++-- .../docs/aws/services/servicediscovery.mdx | 4 +-- src/content/docs/aws/services/sqs.mdx | 6 ++-- .../docs/aws/services/stepfunctions.mdx | 18 +++++------ src/content/docs/aws/services/swf.mdx | 8 ++--- src/content/docs/aws/services/transfer.mdx | 2 +- .../docs/aws/services/verifiedpermissions.mdx | 2 +- src/content/docs/aws/services/waf.mdx | 2 +- src/content/docs/aws/services/xray.mdx | 4 +-- .../docs/aws/tooling/aws-replicator.mdx | 18 +++++------ .../extensions/developing-extensions.mdx | 8 ++--- .../extensions/managing-extensions.mdx | 4 +-- .../tooling/lambda-tools/hot-reloading.mdx | 12 ++++---- .../tooling/lambda-tools/remote-debugging.mdx | 30 +++++++++---------- .../aws/tooling/localstack-sdks/java-sdk.md | 6 ++-- .../aws/tooling/localstack-sdks/python-sdk.md | 12 ++++---- src/content/docs/aws/tooling/testing-utils.md | 2 +- .../docs/aws/tutorials/elb-load-balancing.mdx | 12 ++++---- .../tutorials/s3-static-website-terraform.mdx | 24 +++++++-------- 20 files changed, 92 insertions(+), 92 deletions(-) diff --git a/src/content/docs/aws/services/rds.mdx b/src/content/docs/aws/services/rds.mdx index e40e9ff4..a917c0fa 100644 --- a/src/content/docs/aws/services/rds.mdx +++ b/src/content/docs/aws/services/rds.mdx @@ -329,7 +329,7 @@ The [`aws_s3` extension](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/ In the SQL code snippet below, we are loading the `aws_s3` extension, then use the `table_import_from_s3(..)` function to populate the data in a table `table1` from a CSV file `test.csv` stored in a local S3 bucket `mybucket1`: -```sql +```sql lineNumbers CREATE EXTENSION IF NOT EXISTS aws_s3 CASCADE; SELECT aws_s3.table_import_from_s3( 'table1', 'c1, c2, c3', '(format csv)', @@ -339,7 +339,7 @@ SELECT aws_s3.table_import_from_s3( Analogously, we can use the `query_export_to_s3(..)` extension function to export data from a table `table2` into a CSV file `test.csv` in local S3 bucket `mybucket2`: -```sql +```sql lineNumbers CREATE EXTENSION IF NOT EXISTS aws_s3 CASCADE; SELECT aws_s3.query_export_to_s3( 'SELECT * FROM table2', diff --git a/src/content/docs/aws/services/s3.mdx b/src/content/docs/aws/services/s3.mdx index 1343276b..2715d0ee 100644 --- a/src/content/docs/aws/services/s3.mdx +++ b/src/content/docs/aws/services/s3.mdx @@ -181,7 +181,7 @@ awslocal s3api create-bucket --bucket cors-bucket Next, create a JSON file with the CORS configuration. The file should have the following format: -```json title="cors-config.json" +```json title="cors-config.json" lineNumbers { "CORSRules": [ { @@ -217,7 +217,7 @@ Your S3 bucket is configured to allow cross-origin resource sharing, and if you However, if you try to access your bucket from [LocalStack Web Application](https://app.localstack.cloud), you'll see errors, and your bucket won't be accessible anymore. We can edit the JSON file `cors-config.json` you created earlier with the following configuration and save it: -```json title="cors-config.json" +```json title="cors-config.json" lineNumbers { "CORSRules": [ { @@ -263,7 +263,7 @@ IMAGE_NAME=localstack/localstack:s3-latest localstack start ``` -```yaml +```yaml lineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/services/servicediscovery.mdx b/src/content/docs/aws/services/servicediscovery.mdx index a8646c2e..d2cdcb58 100644 --- a/src/content/docs/aws/services/servicediscovery.mdx +++ b/src/content/docs/aws/services/servicediscovery.mdx @@ -82,7 +82,7 @@ awslocal ecs create-cluster \ Next, you will register a task definition that's compatible with Fargate. Create a file named `fargate-task.json` and add the following content: -```json title="fargate-task.json" +```json title="fargate-task.json" lineNumbers { "family": "tutorial-task-def", "networkMode": "awsvpc", @@ -157,7 +157,7 @@ Make a note of the `GroupId` and `SubnetId` values. Create a new file named `ecs-service-discovery.json` and add the following content to it: -```json title="ecs-service-discovery.json" +```json title="ecs-service-discovery.json" lineNumbers { "cluster": "tutorial", "serviceName": "ecs-service-discovery", diff --git a/src/content/docs/aws/services/sqs.mdx b/src/content/docs/aws/services/sqs.mdx index 8af29e2e..75ee5348 100644 --- a/src/content/docs/aws/services/sqs.mdx +++ b/src/content/docs/aws/services/sqs.mdx @@ -408,7 +408,7 @@ curl "http://localhost.localstack.cloud:4566/_aws/sqs/messages?QueueUrl=http://s ``` -```python +```python lineNumbers import requests response = requests.get( @@ -577,7 +577,7 @@ aws --endpoint-url=http://localhost.localstack.cloud:4566/_aws/sqs/messages sqs ``` -```python +```python lineNumbers import boto3 sqs = boto3.client("sqs", endpoint_url="http://localhost.localstack.cloud:4566/_aws/sqs/messages") response = sqs.receive_message(QueueUrl="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/my-queue") @@ -631,7 +631,7 @@ curl -H "Accept: application/json" \ ``` -```python +```python lineNumbers import requests response = requests.get( "http://localhost.localstack.cloud:4566/_aws/sqs/messages", diff --git a/src/content/docs/aws/services/stepfunctions.mdx b/src/content/docs/aws/services/stepfunctions.mdx index dbf2b155..39426eef 100644 --- a/src/content/docs/aws/services/stepfunctions.mdx +++ b/src/content/docs/aws/services/stepfunctions.mdx @@ -28,7 +28,7 @@ You can create a state machine using the [`CreateStateMachine`](https://docs.aws The API requires the name of the state machine, the state machine definition, and the role ARN that the state machine will assume to call AWS services. Run the following command to create a state machine: -```bash +```bash lineNumbers awslocal stepfunctions create-state-machine \ --name "CreateAndListBuckets" \ --definition '{ @@ -167,7 +167,7 @@ The first step is to select the state machine where mocked responses should be a In this example, we'll use a state machine named `LambdaSQSIntegration`, defined as follows: -```json title="LambdaSQSIntegration.json" +```json title="LambdaSQSIntegration.json" lineNumbers { "Comment": "This state machine is called: LambdaSQSIntegration", "QueryLanguage": "JSONata", @@ -223,7 +223,7 @@ This section specifies the Step Functions state machines to mock, along with the Each test case maps state names to `ResponseID`s defined in the `MockedResponses` section. -```json +```json lineNumbers "StateMachines": { "": { "TestCases": { @@ -247,7 +247,7 @@ At runtime, if a test case is selected, the state uses the mocked response (if d Below is a complete example of the `StateMachines` section: -```json +```json lineNumbers "LambdaSQSIntegration": { "TestCases": { "LambdaRetryCase": { @@ -264,7 +264,7 @@ This section defines mocked responses for Task states. Each `ResponseID` includes one or more step keys and defines either a `Return` value or a `Throw` error. -```json +```json lineNumbers "MockedResponses": { "": { "": { "Return": ... }, @@ -287,7 +287,7 @@ Each entry must have **either** `Return` or `Throw`, but cannot have both. Here is a complete example of the `MockedResponses` section: -```json +```json lineNumbers "MockedLambdaStateRetry": { "0": { "Throw": { @@ -314,7 +314,7 @@ Here is a complete example of the `MockedResponses` section: The `MockConfigFile.json` below is used to test the `LambdaSQSIntegration` state machine defined earlier. -```json +```json lineNumbers { "StateMachines":{ "LambdaSQSIntegration":{ @@ -394,7 +394,7 @@ localstack start --volume /path/to/MockConfigFile.json:/tmp/MockConfigFile.json ``` -```yaml +```yaml lineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -452,7 +452,7 @@ awslocal stepfunctions describe-execution \ --execution-arn "arn:aws:states:us-east-1:000000000000:execution:LambdaSQSIntegration:MockExecutionBaseCase" ``` -```json +```json lineNumbers { "executionArn": "arn:aws:states:us-east-1:000000000000:execution:LambdaSQSIntegration:MockExecutionBaseCase", "stateMachineArn": "arn:aws:states:us-east-1:000000000000:stateMachine:LambdaSQSIntegration", diff --git a/src/content/docs/aws/services/swf.mdx b/src/content/docs/aws/services/swf.mdx index 6eaaca0d..e0802e5a 100644 --- a/src/content/docs/aws/services/swf.mdx +++ b/src/content/docs/aws/services/swf.mdx @@ -82,7 +82,7 @@ awslocal swf list-domains --registration-status DEPRECATED You can register a workflow using the [`RegisterWorkflowType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterWorkflowType.html) API. Execute the following command to register a workflow named `test-workflow`: -```bash +```bash lineNumbers awslocal swf register-workflow-type \ --domain test-domain \ --name test-workflow \ @@ -130,7 +130,7 @@ The following output would be retrieved: You can register an activity using the [`RegisterActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterActivityType.html) API. Execute the following command to register an activity named `test-activity`: -```bash +```bash lineNumbers awslocal swf register-activity-type \ --domain test-domain \ --name test-activity \ @@ -145,7 +145,7 @@ awslocal swf register-activity-type \ You can use the [`DescribeActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeActivityType.html) API to verify that the activity was registered successfully. Run the following command to describe the `test-activity` activity: -```bash +```bash lineNumbers awslocal swf describe-activity-type \ --domain test-domain \ --activity-type name=test-activity,version=1.0 @@ -180,7 +180,7 @@ The following output would be retrieved: You can start a workflow execution using the [`StartWorkflowExecution`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_StartWorkflowExecution.html) API. Execute the following command to start a workflow execution for the `test-workflow` workflow: -```bash +```bash lineNumbers awslocal swf start-workflow-execution \ --domain test-domain \ --workflow-type name=test-workflow,version=1.0 \ diff --git a/src/content/docs/aws/services/transfer.mdx b/src/content/docs/aws/services/transfer.mdx index 6574c0eb..d8d21d32 100644 --- a/src/content/docs/aws/services/transfer.mdx +++ b/src/content/docs/aws/services/transfer.mdx @@ -17,7 +17,7 @@ Whether you're looking to facilitate file transfers or enhance your data access This Python code demonstrates a basic workflow for transferring a file between a local machine and AWS S3 using the AWS Transfer Family service and FTP (File Transfer Protocol). -```python +```python lineNumbers import io import time import uuid diff --git a/src/content/docs/aws/services/verifiedpermissions.mdx b/src/content/docs/aws/services/verifiedpermissions.mdx index f786dc6f..75f64e12 100644 --- a/src/content/docs/aws/services/verifiedpermissions.mdx +++ b/src/content/docs/aws/services/verifiedpermissions.mdx @@ -55,7 +55,7 @@ To create a Verified Permissions Policy, use the [`CreatePolicy`](https://docs.a Create a JSON file named `static_policy.json` with the following content: -```json +```json lineNumbers { "static": { "description": "Grant the User alice access to view the trip Album", diff --git a/src/content/docs/aws/services/waf.mdx b/src/content/docs/aws/services/waf.mdx index af081bf6..871a3b83 100644 --- a/src/content/docs/aws/services/waf.mdx +++ b/src/content/docs/aws/services/waf.mdx @@ -26,7 +26,7 @@ We will walk you through creating, listing, tagging, and viewing tags for Web Ac Start by creating a Web Access Control List (WebACL) using the [`CreateWebACL`](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateWebACL.html) API. Run the following command to create a WebACL named `TestWebAcl`: -```bash +```bash lineNumbers awslocal wafv2 create-web-acl \ --name TestWebAcl \ --scope REGIONAL \ diff --git a/src/content/docs/aws/services/xray.mdx b/src/content/docs/aws/services/xray.mdx index b9543570..2c6e7134 100644 --- a/src/content/docs/aws/services/xray.mdx +++ b/src/content/docs/aws/services/xray.mdx @@ -42,7 +42,7 @@ You can generates a unique trace ID and constructs a JSON document with trace in It then sends this trace segment to the AWS X-Ray API using the [PutTraceSegments](https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html) API. Run the following commands in your terminal: -```bash +```bash lineNumbers START_TIME=$(date +%s) HEX_TIME=$(printf '%x\n' $START_TIME) GUID=$(dd if=/dev/random bs=12 count=1 2>/dev/null | od -An -tx1 | tr -d ' \t\n') @@ -68,7 +68,7 @@ Sending trace segment to X-Ray API: {"trace_id": "1-6501ee11-056ec85fafff21f648e You can now retrieve the trace summaries from the last 10 minutes using the [GetTraceSummaries](https://docs.aws.amazon.com/xray/latest/api/API_GetTraceSummaries.html) API. Run the following commands in your terminal: -```bash +```bash lineNumbers EPOCH=$(date +%s) awslocal xray get-trace-summaries --start-time $(($EPOCH-600)) --end-time $(($EPOCH)) { diff --git a/src/content/docs/aws/tooling/aws-replicator.mdx b/src/content/docs/aws/tooling/aws-replicator.mdx index c8aa59ff..a2443bf4 100644 --- a/src/content/docs/aws/tooling/aws-replicator.mdx +++ b/src/content/docs/aws/tooling/aws-replicator.mdx @@ -77,7 +77,7 @@ Follow the [installation instructions](/aws/getting-started/installation/#instal To start a replication job, get the ARN of the resource to replicate. Then, trigger the job using the command: -```bash +```bash lineNumbers export LOCALSTACK_AUTH_TOKEN= export AWS_DEFAULT_REGION=... # if required @@ -101,7 +101,7 @@ localstack replicator start --resource-arn This triggers the replication job. The output will look similar to: -```json +```json lineNumbers { "job_id": "50005865-1589-4f6d-a720-c86f5a5dd021", "state": "TESTING_CONNECTION", @@ -125,7 +125,7 @@ The output will look similar to: To trigger replication via the HTTP API, send a `POST` request to `http://localhost.localstack.cloud:4566/_localstack/replicator/jobs` with the following payload: -```json +```json lineNumbers { "replication_type": "SINGLE_RESOURCE", "replication_job_config": { @@ -160,7 +160,7 @@ $ localstack replicator status This command returns the job status in JSON format, for example: -```json +```json lineNumbers { "job_id": "50005865-1589-4f6d-a720-c86f5a5dd021", "state": "SUCCEEDED", @@ -210,7 +210,7 @@ AWS_PROFILE=ls-sandbox aws ssm put-parameter\ AWS_PROFILE=ls-sandbox aws ssm get-parameters --names myparam ``` -```json +```json lineNumbers { "Parameters": [ { @@ -235,7 +235,7 @@ Next, we can check that the parameter is not present in LocalStack using `awsloc awslocal ssm get-parameters --name myparam ``` -```json +```json lineNumbers { "Parameters": [], "InvalidParameters": [ @@ -258,7 +258,7 @@ LOCALSTACK_AUTH_TOKEN= \ Configured credentials from the AWS CLI -```json +```json lineNumbers { "job_id": "9acdc850-f71b-4474-b138-1668eb8b8396", "state": "TESTING_CONNECTION", @@ -278,7 +278,7 @@ LOCALSTACK_AUTH_TOKEN= \ localstack replicator status 9acdc850-f71b-4474-b138-1668eb8b8396 ``` -```json +```json lineNumbers { "job_id": "9acdc850-f71b-4474-b138-1668eb8b8396", "state": "SUCCEEDED", @@ -297,7 +297,7 @@ The SSM parameter is now accessible. awslocal ssm get-parameters --name myparam --region eu-central-1 ``` -```json +```json lineNumbers { "Parameters": [ { diff --git a/src/content/docs/aws/tooling/extensions/developing-extensions.mdx b/src/content/docs/aws/tooling/extensions/developing-extensions.mdx index ae839622..e6fc71dd 100644 --- a/src/content/docs/aws/tooling/extensions/developing-extensions.mdx +++ b/src/content/docs/aws/tooling/extensions/developing-extensions.mdx @@ -18,7 +18,7 @@ LocalStack exposes a Python API for building extensions that can be found in the The basic interface to implement is as follows: -```python +```python lineNumbers class Extension(BaseExtension): """ An extension that is loaded into LocalStack dynamically. The method @@ -89,7 +89,7 @@ class Extension(BaseExtension): A minimal example would look like this: -```python +```python lineNumbers import logging from localstack.extensions.api import Extension @@ -123,7 +123,7 @@ example below. A minimal `setup.cfg` for the extension above could look like this: -```toml +```toml lineNumbers [metadata] name = localstack-extension-ready-announcer description = LocalStack extension that logs when LocalStack is ready to receive requests @@ -174,7 +174,7 @@ Commands: First, create a new extension from a template. To use `localstack extensions dev new`, you will also need to install [cookiecutter](https://github.com/cookiecutter/cookiecutter) via `pip install cookiecutter`. -```bash +```bash lineNumbers $ localstack extensions dev new project_name [My LocalStack Extension]: project_short_description [All the boilerplate you need to create a LocalStack extension.]: diff --git a/src/content/docs/aws/tooling/extensions/managing-extensions.mdx b/src/content/docs/aws/tooling/extensions/managing-extensions.mdx index e0315765..afd5accc 100644 --- a/src/content/docs/aws/tooling/extensions/managing-extensions.mdx +++ b/src/content/docs/aws/tooling/extensions/managing-extensions.mdx @@ -109,7 +109,7 @@ If you want to use the `file://` directive, the distribution file needs to be mo In a docker-compose file, this would look something like: -```yaml +```yaml lineNumbers services: localstack: container_name: "localstack-main" @@ -153,7 +153,7 @@ An example project could look something like this: * `docker-compose.yaml` - ```yaml + ```yaml lineNumbers services: localstack: ... diff --git a/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx b/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx index 8284cf34..63d652f5 100644 --- a/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx +++ b/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx @@ -227,7 +227,7 @@ First, create a watchman wrapper by using After that, you can use the following `Makefile` snippet, or implement another shell script to prepare the codebase for hot reloading: -```make +```make lineNumbers BUILD_FOLDER ?= build PROJECT_MODULE_NAME = my_project_module @@ -277,7 +277,7 @@ npm install -D @types/aws-lambda esbuild Create a new file named `index.ts`. Add the following code to the new file: -```ts +```ts lineNumbers import { Context, APIGatewayProxyResult, APIGatewayEvent } from 'aws-lambda'; export const handler = async (event: APIGatewayEvent, context: Context): Promise => { @@ -453,7 +453,7 @@ You can now see that the changes are applied without redeploying the Lambda func -```yaml +```yaml lineNumbers custom: localstack: ... @@ -477,7 +477,7 @@ custom: ``` -```kotlin +```kotlin lineNumbers package org.localstack.cdkstack import java.util.UUID @@ -521,7 +521,7 @@ class ApplicationStack(parent: Construct, name: String) : Stack(parent, name) { ``` -```hcl +```hcl lineNumbers variable "STAGE" { type = string default = "local" @@ -650,7 +650,7 @@ For bash, please use single quotes `'` instead of double quotes `"` to make sure In order to make use of the environment variable placeholders, you can inject them into the LocalStack container, for example using the following `docker-compose.yml` file. -```yaml +```yaml lineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx b/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx index dfd6d3b6..eff9c268 100644 --- a/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx +++ b/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx @@ -56,7 +56,7 @@ inside the Lambda function code. In general, all you need is the following code fragment placed inside your handler code: -```python +```python lineNumbers import debugpy debugpy.listen(("0.0.0.0", 19891)) debugpy.wait_for_client() # blocks execution until client is attached @@ -65,7 +65,7 @@ debugpy.wait_for_client() # blocks execution until client is attached For extra convenience, you can use the `wait_for_debug_client` function from our example. It implements the above-mentioned start of the debug server and also adds an automatic cancellation of the wait task if the debug client (i.e. VSCode) doesn't connect. -```python +```python lineNumbers def wait_for_debug_client(timeout=15): """Utility function to enable debugging with Visual Studio Code""" import time, threading @@ -89,7 +89,7 @@ def wait_for_debug_client(timeout=15): For attaching the debug server from Visual Studio Code, you need to add a run configuration. -```json +```json lineNumbers { "version": "0.2.0", "configurations": [ @@ -287,7 +287,7 @@ Make sure you installed the following extensions: Add a new task by creating/modifying the `.vscode/tasks.json` file: -```json +```json lineNumbers { "version": "2.0.0", "tasks": [ @@ -303,7 +303,7 @@ Add a new task by creating/modifying the `.vscode/tasks.json` file: Create a new `launch.json` file or edit an existing one from the `Run and Debug` tab, then add the following configuration: -```json +```json lineNumbers { "version": "0.2.0", "configurations": [ @@ -330,7 +330,7 @@ lambda function. Set the `LAMBDA_DOCKER_FLAGS` to enable the debugger using `NODE_OPTIONS`: -```yaml +```yaml lineNumbers #docker-compose.yml services: @@ -345,7 +345,7 @@ services: Add a new task by creating/modifying the `.vscode/tasks.json` file: -```json +```json lineNumbers { "version": "2.0.0", "tasks": [ @@ -361,7 +361,7 @@ Add a new task by creating/modifying the `.vscode/tasks.json` file: Create a new `launch.json` file or edit an existing one from the `Run and Debug` tab, then add the following configuration: -```json +```json lineNumbers { "version": "0.2.0", "configurations": [ @@ -381,7 +381,7 @@ then add the following configuration: A simple example of a Node.js lambda, `myindex.js` could look like this: -```js +```js lineNumbers exports.handler = async (event) => { console.log(event); const response = { @@ -469,7 +469,7 @@ localstack start --volume /path/to/debug-config.yaml:/tmp/lambda_debug_mode_conf ``` -```yaml +```yaml lineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -500,7 +500,7 @@ for each specific Lambda function ARN. #### Example: Basic Debugging Configuration This example configures Lambda Debug Mode to use port 19891 for the remote debugger. -```yaml +```yaml lineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one: debug-port: 19891 @@ -510,7 +510,7 @@ functions: In this example, the automatic timeout handling feature is disabled for the specified Lambda function, enforcing the predefined timeouts instead. -```yaml +```yaml lineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one: debug-port: 19891 @@ -522,7 +522,7 @@ functions: Specifying an unqualified Lambda ARN in the configuration is equivalent to specifying the ARN with the `$LATEST` version qualifier. -```yaml +```yaml lineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one:$LATEST: debug-port: 19891 @@ -534,7 +534,7 @@ To debug multiple Lambda functions simultaneously, assign a different debug port Note that this configuration affects the container's internal debugger port as well, so the debugger port must be set accordingly. -```yaml +```yaml lineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one: debug-port: 19891 @@ -546,7 +546,7 @@ functions: You can also debug different versions of the same Lambda function by assigning unique ports to each version. -```yaml +```yaml lineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one:1: debug-port: 19891 diff --git a/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md b/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md index 128a6c3d..2fa2b2c4 100644 --- a/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md +++ b/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md @@ -25,7 +25,7 @@ This SDK is still in a preview phase, and will be subject to fast and breaking c The best way to use the LocalStack SDK for Java in your project is to consume it from Maven Central. You can use Maven to import the entire SDK into your project. -```xml +```xml lineNumbers cloud.localstack localstack-sdk @@ -50,7 +50,7 @@ For instance, let us imagine the case in which you want to add a fault rule for You first need to use the `FaultRuleRequest` class to build a fault rule request. Then, you need to pass such a request object to the `addFaultRules` method of a created `ChaosClient`. -```java +```java lineNumbers import cloud.localstack.sdk.chaos.ChaosClient; import cloud.localstack.sdk.chaos.requests.FaultRuleRequest; @@ -63,7 +63,7 @@ As a second example, let us look at the necessary code to save and load a Cloud Similarly to the `ChaosClient`, the `PodsClient` exposes two functions, `savePod` and `loadPod`, which expect a `SavePodRequest` and a `LoadPodRequest`, respectively. The resulting code is the following: -```java +```java lineNumbers import cloud.localstack.sdk.pods.PodsClient; import cloud.localstack.sdk.pods.requests.LoadPodRequest; import cloud.localstack.sdk.pods.requests.SavePodRequest; diff --git a/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md b/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md index 87508d81..98c4fe2c 100644 --- a/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md +++ b/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md @@ -43,7 +43,7 @@ Using the SDK in Python is straightforward: developers can import the relevant m The following code snippet shows how to set up an SQS client, create a queue, send messages, and retrieve them to test local SQS interactions using LocalStack. -```python +```python lineNumbers import json import boto3 import localstack.sdk.aws @@ -88,7 +88,7 @@ for msg in messages: -```bash +```bash lineNumbers Message Body: {"event": "event-0", "message": "message-0"} Message Body: {"event": "event-1", "message": "message-1"} Message Body: {"event": "event-2", "message": "message-2"} @@ -100,7 +100,7 @@ Message Body: {"event": "event-4", "message": "message-4"} The following code snippet verifies an email address, sends a raw email, retrieves the message ID, and discards all SES messages afterward. -```python +```python lineNumbers import boto3 import localstack.sdk.aws @@ -148,7 +148,7 @@ Cloud Pods is a feature that enables storing and managing snapshots of the curre This code snippet shows listing available pods, saving a new pod, loading it, and then deleting it. You need to set your `LOCALSTACK_AUTH_TOKEN` in your terminal session before running the snippet. -```python +```python lineNumbers from localstack.sdk.pods import PodsClient POD_NAME = "ls-cloud-pod" @@ -184,7 +184,7 @@ Pod 'ls-cloud-pod' deleted. The following example demonstrates how to reset the current cloud state using LocalStack’s `StateClient`. -```python +```python lineNumbers import boto3 from localstack.sdk.state import StateClient @@ -228,7 +228,7 @@ Error after state reset: AWS.SimpleQueueService.NonExistentQueue LocalStack’s Chaos API enables fault injection to simulate issues in AWS services. This example shows how to add a fault rule for the S3 service, retrieve and display the rule, and finally delete it to return to normal operations. -```python +```python lineNumbers import localstack.sdk.chaos from localstack.sdk.models import FaultRule diff --git a/src/content/docs/aws/tooling/testing-utils.md b/src/content/docs/aws/tooling/testing-utils.md index b1e65fed..c41f3096 100644 --- a/src/content/docs/aws/tooling/testing-utils.md +++ b/src/content/docs/aws/tooling/testing-utils.md @@ -23,7 +23,7 @@ pip install localstack-utils ### Usage -```python +```python lineNumbers import time import boto3 import unittest diff --git a/src/content/docs/aws/tutorials/elb-load-balancing.mdx b/src/content/docs/aws/tutorials/elb-load-balancing.mdx index 64acf699..5b180910 100644 --- a/src/content/docs/aws/tutorials/elb-load-balancing.mdx +++ b/src/content/docs/aws/tutorials/elb-load-balancing.mdx @@ -93,7 +93,7 @@ This bucket is responsible for storing the deployment artifacts and ensuring tha We have a `serverless.yml` file in the directory to define our Serverless project's configuration, which includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. To set up the plugins we installed earlier, you need to add the following properties to your `serverless.yml` file: -```yaml +```yaml lineNumbers service: serverless-elb frameworkVersion: '3' @@ -123,7 +123,7 @@ Configure a `deploy` script in your `package.json` file to simplify the deployme It lets you run the `serverless deploy` command directly over your local infrastructure. Update your `package.json` file to include the following: -```json +```json lineNumbers { "name": "serverless-elb", "version": "1.0.0", @@ -156,7 +156,7 @@ This will execute the `serverless deploy --stage local` command, deploying your Now, let's create two Lambda functions named `hello1` and `hello2` that will run on the Node.js 12.x runtime. Open the `handler.js` file and replace the existing code with the following: -```js +```js lineNumbers 'use strict'; module.exports.hello1 = async (event) => { @@ -194,7 +194,7 @@ It is typically used when you need to include binary content in the response bod Let us now configure the `serverless.yml` file to create an Application Load Balancer (ALB) and attach the Lambda functions to it. -```yaml +```yaml lineNumbers service: serverless-elb provider: @@ -239,7 +239,7 @@ In this example, both functions are triggered by HTTP GET requests to the `/hell Lastly, let's create a VPC, a subnet, an Application Load Balancer, and an HTTP listener on the load balancer that redirects traffic to the target group. To do this, add the following resources to your `serverless.yml` file: -```yaml +```yaml lineNumbers ... resources: Resources: @@ -321,7 +321,7 @@ This output confirms the successful deployment of your Serverless service to the It also displays information about the deployed Lambda functions (`hello1` and `hello2`). You can run the following command to verify that the functions and the load balancers have been deployed: -```bash +```bash lineNumbers awslocal lambda list-functions { "Functions": [ diff --git a/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx index 16446e69..3d06f47e 100644 --- a/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx +++ b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx @@ -47,7 +47,7 @@ Optionally, you can create a folder called `assets` to store images and other as Let's create a directory named `s3-static-website-localstack` where we'll store our static website files. If you don't have an `index.html` file, you can use the following code to create one: -```html +```html lineNumbers @@ -65,7 +65,7 @@ S3 will serve this file when a user visits the root URL of your static website, In a similar fashion, you can configure a custom error document that contains a user-friendly error message. Let's create a file named `error.html` and add the following code: -```html +```html lineNumbers @@ -96,7 +96,7 @@ awslocal s3api create-bucket --bucket testwebsite With the bucket created, we can now attach a policy to it to allow public access and its contents. Let's create a file named `bucket_policy.json` in the root directory and add the following code: -```json +```json lineNumbers { "Version": "2012-10-17", "Statement": [ @@ -147,7 +147,7 @@ Before that, we would need to manually configure the local service endpoints and We will use the [AWS Provider for Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to interact with the many resources supported by AWS in LocalStack. Create a new file named `provider.tf` and specify mock credentials for the AWS provider: -```hcl +```hcl lineNumbers provider "aws" { region = "us-east-1" access_key = "fake" @@ -161,7 +161,7 @@ Additionally, we have to point the individual services to LocalStack. We can do this by specifying the `endpoints` parameter for each service, that we intend to use. Our `provider.tf` file should look like this: -```hcl +```hcl lineNumbers provider "aws" { access_key = "test" secret_key = "test" @@ -190,7 +190,7 @@ For most of the other services, it is fine to use `localhost:4566`. With the provider configured, we can now configure the variables for our S3 bucket. Create a new file named `variables.tf` and add the following code: -```hcl +```hcl lineNumbers variable "bucket_name" { description = "Name of the s3 bucket. Must be unique." type = string @@ -207,7 +207,7 @@ We take a user input for the bucket name and tags. Next, we will define the output variables for our Terraform configuration. Create a new file named `outputs.tf` and add the following code: -```hcl +```hcl lineNumbers output "arn" { description = "ARN of the bucket" value = aws_s3_bucket.s3_bucket.arn @@ -232,7 +232,7 @@ The output variables are the ARN, name, domain name, and website endpoint of the With all the configuration files in place, we can now create the S3 bucket. Create a new file named `main.tf` and create the S3 bucket using the following code: -```hcl +```hcl lineNumbers resource "aws_s3_bucket" "s3_bucket" { bucket = var.bucket_name tags = var.tags @@ -242,7 +242,7 @@ resource "aws_s3_bucket" "s3_bucket" { To configure the static website hosting, we will use the `aws_s3_bucket_website_configuration` resource. Add the following code to the `main.tf` file: -```hcl +```hcl lineNumbers resource "aws_s3_bucket_website_configuration" "s3_bucket" { bucket = aws_s3_bucket.s3_bucket.id @@ -260,7 +260,7 @@ resource "aws_s3_bucket_website_configuration" "s3_bucket" { To set the bucket policy, we will use the `aws_s3_bucket_policy` resource. Add the following code to the `main.tf` file: -```hcl +```hcl lineNumbers resource "aws_s3_bucket_acl" "s3_bucket" { bucket = aws_s3_bucket.s3_bucket.id acl = "public-read" @@ -292,7 +292,7 @@ Pick up an appropriate policy based on your use case. Let's use the `aws_s3_object` resource to upload the files to the bucket. Add the following code to the `main.tf` file: -```hcl +```hcl lineNumbers resource "aws_s3_object" "object_www" { depends_on = [aws_s3_bucket.s3_bucket] for_each = fileset("${path.root}", "*.html") @@ -309,7 +309,7 @@ The above code uploads all our html files to the bucket. We are also setting the ACL of the files to `public-read`. Optionally, if you have static assets like images, CSS, and JavaScript files, you can upload them to the bucket using the same `aws_s3_bucket_object` resource by adding the following code to the `main.tf` file: -```hcl +```hcl lineNumbers resource "aws_s3_object" "object_assets" { depends_on = [aws_s3_bucket.s3_bucket] for_each = fileset(path.module, "assets/*") From 4f468334f6c717adf0a1e96c6e35b645be7c4c1f Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Tue, 24 Jun 2025 14:27:14 -0700 Subject: [PATCH 5/8] starting snow docs --- src/content/docs/snowflake/capabilities/init-hooks.mdx | 2 +- src/content/docs/snowflake/capabilities/state-management.mdx | 4 ++-- src/content/docs/snowflake/features/accounts.md | 4 ++-- src/content/docs/snowflake/features/authentication.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/content/docs/snowflake/capabilities/init-hooks.mdx b/src/content/docs/snowflake/capabilities/init-hooks.mdx index 9292b8b0..1e0c5360 100644 --- a/src/content/docs/snowflake/capabilities/init-hooks.mdx +++ b/src/content/docs/snowflake/capabilities/init-hooks.mdx @@ -30,7 +30,7 @@ Mount the script into `/etc/localstack/init/ready.d/` using Docker Compose or th -```yaml +```yaml lineNumbers version: "3.8" services: diff --git a/src/content/docs/snowflake/capabilities/state-management.mdx b/src/content/docs/snowflake/capabilities/state-management.mdx index 6b222b18..e7fbff1c 100644 --- a/src/content/docs/snowflake/capabilities/state-management.mdx +++ b/src/content/docs/snowflake/capabilities/state-management.mdx @@ -32,7 +32,7 @@ localstack start ``` -```yaml +```yaml lineNumbers ... image: localstack/snowflake environment: @@ -43,7 +43,7 @@ localstack start ``` -```bash +```bash lineNumbers docker run \ -e LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} \ -e PERSISTENCE=1 \ diff --git a/src/content/docs/snowflake/features/accounts.md b/src/content/docs/snowflake/features/accounts.md index cb780846..f721c504 100644 --- a/src/content/docs/snowflake/features/accounts.md +++ b/src/content/docs/snowflake/features/accounts.md @@ -25,7 +25,7 @@ If the Snowflake driver provides a connection object, you can pass the `account` Example using the Snowflake Connector for Python: -```python +```python lineNumbers sf_conn_obj = sf.connect( account="your_account", # other parameters @@ -34,7 +34,7 @@ sf_conn_obj = sf.connect( Example using the NodeJS Driver for Snowflake: -```javascript +```javascript lineNumbers var connection = snowflake.createConnection({ account: "your_account", // other parameters diff --git a/src/content/docs/snowflake/features/authentication.md b/src/content/docs/snowflake/features/authentication.md index 89c32ab6..3cb9c7a8 100644 --- a/src/content/docs/snowflake/features/authentication.md +++ b/src/content/docs/snowflake/features/authentication.md @@ -18,7 +18,7 @@ To authenticate using a username and password, you can set the `user` and `passw Here's an example of how to connect to the Snowflake emulator using a username and password in a Python script: -```python +```python lineNumbers import snowflake.connector as sf sf_conn_obj = sf.connect( @@ -55,7 +55,7 @@ ALTER USER your_user_name SET RSA_PUBLIC_KEY=''; Then authenticate with the private key using the Snowflake client: -```python +```python lineNumbers import snowflake.connector conn = snowflake.connector.connect( From 76503e36b4ae3cd9530b27402ec5e3f4c3608cf8 Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Tue, 24 Jun 2025 14:30:53 -0700 Subject: [PATCH 6/8] fixed my dumbass mistake, wrote lineNumbers instead of showLineNumbers --- .../chaos-engineering/chaos-api.md | 2 +- .../docs/aws/capabilities/config/podman.md | 2 +- .../networking/accessing-endpoint-url.mdx | 4 +-- .../networking/external-port-range.mdx | 2 +- .../custom-tls-certificates.mdx | 4 +-- .../security-testing/explainable-iam.md | 4 +-- .../security-testing/iam-policy-stream.md | 2 +- .../state-management/cloud-pods.mdx | 6 ++-- .../state-management/persistence.mdx | 2 +- .../docs/aws/enterprise/k8s-operator.md | 2 +- src/content/docs/aws/getting-started/faq.mdx | 4 +-- .../docs/aws/getting-started/installation.mdx | 4 +-- .../docs/aws/getting-started/quickstart.mdx | 12 +++---- .../integrations/app-frameworks/quarkus.md | 4 +-- .../app-frameworks/serverless-framework.md | 6 ++-- .../app-frameworks/spring-cloud-function.mdx | 28 ++++++++-------- .../integrations/aws-native-tools/aws-sam.md | 2 +- .../docs/aws/integrations/aws-sdks/cpp.md | 2 +- .../docs/aws/integrations/aws-sdks/go.mdx | 4 +-- .../docs/aws/integrations/aws-sdks/java.mdx | 16 +++++----- .../aws/integrations/aws-sdks/javascript.mdx | 4 +-- .../docs/aws/integrations/aws-sdks/net.md | 4 +-- .../docs/aws/integrations/aws-sdks/php.md | 2 +- .../aws/integrations/aws-sdks/python-boto3.md | 2 +- .../docs/aws/integrations/aws-sdks/ruby.md | 4 +-- .../integrations/containers/devcontainers.mdx | 12 +++---- .../aws/integrations/containers/gitpod.md | 2 +- .../integrations/containers/kubernetes.mdx | 4 +-- .../containers/rancher-desktop.mdx | 4 +-- .../continuous-integration/bitbucket.md | 4 +-- .../continuous-integration/circleci.md | 24 +++++++------- .../continuous-integration/codebuild.md | 28 ++++++++-------- .../continuous-integration/github-actions.md | 14 ++++---- .../continuous-integration/gitlab-ci.md | 16 +++++----- .../continuous-integration/travis-ci.md | 4 +-- .../infrastructure-as-code/cloud-custodian.md | 2 +- .../infrastructure-as-code/crossplane.md | 2 +- .../infrastructure-as-code/pulumi.mdx | 2 +- .../infrastructure-as-code/terraform.mdx | 32 +++++++++---------- .../messaging/selfmanaged-kafka-cluster.md | 10 +++--- .../testing/lambdatest-hyperexecute.md | 6 ++-- .../integrations/testing/testcontainers.mdx | 12 +++---- src/content/docs/aws/services/account.mdx | 6 ++-- src/content/docs/aws/services/acm.mdx | 2 +- src/content/docs/aws/services/apacheflink.mdx | 8 ++--- src/content/docs/aws/services/apigateway.mdx | 16 +++++----- .../docs/aws/services/appautoscaling.mdx | 2 +- src/content/docs/aws/services/appconfig.mdx | 2 +- src/content/docs/aws/services/appsync.mdx | 2 +- src/content/docs/aws/services/athena.mdx | 2 +- src/content/docs/aws/services/backup.mdx | 4 +-- .../docs/aws/services/cloudformation.mdx | 4 +-- src/content/docs/aws/services/codebuild.mdx | 14 ++++---- .../docs/aws/services/codepipeline.mdx | 6 ++-- src/content/docs/aws/services/cognito.mdx | 6 ++-- src/content/docs/aws/services/docdb.mdx | 4 +-- .../docs/aws/services/dynamodbstreams.mdx | 4 +-- src/content/docs/aws/services/ecs.mdx | 6 ++-- src/content/docs/aws/services/eks.mdx | 6 ++-- .../aws/services/elementalmediaconvert.mdx | 2 +- src/content/docs/aws/services/es.mdx | 2 +- src/content/docs/aws/services/events.mdx | 2 +- src/content/docs/aws/services/fis.mdx | 2 +- src/content/docs/aws/services/glue.mdx | 2 +- src/content/docs/aws/services/iot.mdx | 4 +-- .../docs/aws/services/iotanalytics.mdx | 2 +- src/content/docs/aws/services/iotwireless.mdx | 2 +- src/content/docs/aws/services/kinesis.mdx | 2 +- .../docs/aws/services/lakeformation.mdx | 2 +- src/content/docs/aws/services/lambda.mdx | 2 +- .../docs/aws/services/managedblockchain.mdx | 4 +-- src/content/docs/aws/services/neptune.mdx | 4 +-- src/content/docs/aws/services/opensearch.mdx | 4 +-- src/content/docs/aws/services/rds.mdx | 4 +-- src/content/docs/aws/services/s3.mdx | 6 ++-- .../docs/aws/services/servicediscovery.mdx | 4 +-- src/content/docs/aws/services/sqs.mdx | 6 ++-- .../docs/aws/services/stepfunctions.mdx | 18 +++++------ src/content/docs/aws/services/swf.mdx | 8 ++--- src/content/docs/aws/services/transfer.mdx | 2 +- .../docs/aws/services/verifiedpermissions.mdx | 2 +- src/content/docs/aws/services/waf.mdx | 2 +- src/content/docs/aws/services/xray.mdx | 4 +-- .../docs/aws/tooling/aws-replicator.mdx | 18 +++++------ .../extensions/developing-extensions.mdx | 8 ++--- .../extensions/managing-extensions.mdx | 4 +-- .../tooling/lambda-tools/hot-reloading.mdx | 12 +++---- .../tooling/lambda-tools/remote-debugging.mdx | 30 ++++++++--------- .../aws/tooling/localstack-sdks/java-sdk.md | 6 ++-- .../aws/tooling/localstack-sdks/python-sdk.md | 12 +++---- src/content/docs/aws/tooling/testing-utils.md | 2 +- .../docs/aws/tutorials/elb-load-balancing.mdx | 12 +++---- .../tutorials/s3-static-website-terraform.mdx | 24 +++++++------- 93 files changed, 310 insertions(+), 310 deletions(-) diff --git a/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md b/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md index 7435897a..c0e8cb52 100644 --- a/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md +++ b/src/content/docs/aws/capabilities/chaos-engineering/chaos-api.md @@ -49,7 +49,7 @@ When active, rules are evaluated sequentially on every request to LocalStack unt The schema for the configuration is as follows. -```json showLineNumbers +```json showshowLineNumbers [ { "region": "(str) Region name, e.g. 'ap-south-1'. If omitted, all regions are affected.", diff --git a/src/content/docs/aws/capabilities/config/podman.md b/src/content/docs/aws/capabilities/config/podman.md index 4a1bd611..2b0d8497 100644 --- a/src/content/docs/aws/capabilities/config/podman.md +++ b/src/content/docs/aws/capabilities/config/podman.md @@ -101,7 +101,7 @@ podman machine set --rootful For the Docker Compose setup, use the following configuration. When running in rootless mode, ensure to comment out the HTTPS gateway port, as it is unable to bind to privileged ports below 1024. -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx b/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx index f2c35196..7da8fc3b 100644 --- a/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx +++ b/src/content/docs/aws/capabilities/networking/accessing-endpoint-url.mdx @@ -130,7 +130,7 @@ docker run --rm -it --dns 172.27.0.2 --network ls ``` -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -200,7 +200,7 @@ docker run --rm it --network my-network ``` -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: # other configuration here diff --git a/src/content/docs/aws/capabilities/networking/external-port-range.mdx b/src/content/docs/aws/capabilities/networking/external-port-range.mdx index 43be5dd8..7f689030 100644 --- a/src/content/docs/aws/capabilities/networking/external-port-range.mdx +++ b/src/content/docs/aws/capabilities/networking/external-port-range.mdx @@ -52,7 +52,7 @@ GATEWAY_LISTEN=0.0.0.0:4766 EXTERNAL_SERVICE_PORTS_START=4710 EXTERNAL_SERVICE_P ``` -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack-main-1: container_name: localstack-main-1 diff --git a/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx b/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx index 02abe182..cf67b60b 100644 --- a/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx +++ b/src/content/docs/aws/capabilities/security-testing/custom-tls-certificates.mdx @@ -34,7 +34,7 @@ If you run LocalStack in a docker container (which includes using [the CLI](/aws Create a `Dockerfile` containing the following commands: -```yaml showLineNumbers +```yaml showshowLineNumbers FROM localstack/localstack:latest # or if using the pro image: FROM localstack/localstack-pro:latest @@ -74,7 +74,7 @@ docker run ``` -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: image: diff --git a/src/content/docs/aws/capabilities/security-testing/explainable-iam.md b/src/content/docs/aws/capabilities/security-testing/explainable-iam.md index d8c0f950..0087e3f7 100644 --- a/src/content/docs/aws/capabilities/security-testing/explainable-iam.md +++ b/src/content/docs/aws/capabilities/security-testing/explainable-iam.md @@ -28,7 +28,7 @@ However we have not included the `iam:PassRole` permission, and we will use the Create a policy document named `policy_1.json` and add the following content: -```json showLineNumbers +```json showshowLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -113,7 +113,7 @@ You can incorporate this action into the policy. For illustrative purposes, we will keep the example straightforward, using the same wildcard resource. Edit the `policy_1.json` file to include the `iam:PassRole` action: -```json showLineNumbers +```json showshowLineNumbers { "Version": "2012-10-17", "Statement": [ diff --git a/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md b/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md index b245c1f9..3567a029 100644 --- a/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md +++ b/src/content/docs/aws/capabilities/security-testing/iam-policy-stream.md @@ -57,7 +57,7 @@ awslocal sns create-topic --name test-topic In the other tab, the required policy will be generated. This policy can then be attached to an IAM role, enabling it to create the resource. -```bash showLineNumbers +```bash showshowLineNumbers Attached to identity: "arn:aws:iam::000000000000:root" Policy: diff --git a/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx b/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx index 2b64865a..996577fb 100644 --- a/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx +++ b/src/content/docs/aws/capabilities/state-management/cloud-pods.mdx @@ -298,7 +298,7 @@ AUTO_LOAD_POD=foo-pod localstack start ``` -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "localstack-main" @@ -352,7 +352,7 @@ LocalStack, upon mounting `init-pods.d` to the appropriate location, will sequen The docker compose file for correctly mounting `init-pods.d` will look like: -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "localstack-main" @@ -498,7 +498,7 @@ With such a configuration, the `foo-pod` Cloud Pod will be loaded from the `bar- To properly configure the remote, you need to provide the needed environment variables when starting the LocalStack container. For instance, a S3 remote needs a `AWS_ACCESS_KEY` and a `AWS_SECRET_ACCESS_KEY`, as follows: -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "localstack-main" diff --git a/src/content/docs/aws/capabilities/state-management/persistence.mdx b/src/content/docs/aws/capabilities/state-management/persistence.mdx index 2f4907c7..8fd4a1a4 100644 --- a/src/content/docs/aws/capabilities/state-management/persistence.mdx +++ b/src/content/docs/aws/capabilities/state-management/persistence.mdx @@ -28,7 +28,7 @@ PERSISTENCE=1 localstack start ``` -```yaml showLineNumbers +```yaml showshowLineNumbers image: localstack/localstack-pro environment: - LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} diff --git a/src/content/docs/aws/enterprise/k8s-operator.md b/src/content/docs/aws/enterprise/k8s-operator.md index a24dcd7d..5abc2696 100644 --- a/src/content/docs/aws/enterprise/k8s-operator.md +++ b/src/content/docs/aws/enterprise/k8s-operator.md @@ -22,7 +22,7 @@ kubectl apply -f https://raw.githubusercontent.com/localstack/localstack-k8s-ope You can then deploy a LocalStack instance by storing the following file content as `localstack.yml` and applying it against the cluster via `kubectl apply -f localstack.yml`. -```bash showLineNumbers +```bash showshowLineNumbers apiVersion: api.localstack.cloud/v1alpha1 kind: LocalStack metadata: diff --git a/src/content/docs/aws/getting-started/faq.mdx b/src/content/docs/aws/getting-started/faq.mdx index 64c59c7a..cbad6a08 100644 --- a/src/content/docs/aws/getting-started/faq.mdx +++ b/src/content/docs/aws/getting-started/faq.mdx @@ -235,7 +235,7 @@ To do so, you need to change the [`docker-compose.yml`](https://github.com/local -```yaml showLineNumbers +```yaml showshowLineNumbers volumes: - "/var/run/docker.sock:/var/run/docker.sock" - "\\\\wsl$\\\\home\\\\volume:/var/lib/localstack" # mount volume in WSL2 Linux file system @@ -246,7 +246,7 @@ To do so, you need to change the [`docker-compose.yml`](https://github.com/local As an alternative, you can set the volume as `- "~/volume:/var/lib/localstack"` then start Docker using command `wsl docker compose -f docker-compose.yml up`. -```yaml showLineNumbers +```yaml showshowLineNumbers volumes: - "/var/run/docker.sock:/var/run/docker.sock" - "localstack_data:/var/lib/localstack" # mount Docker volume diff --git a/src/content/docs/aws/getting-started/installation.mdx b/src/content/docs/aws/getting-started/installation.mdx index 8e74b350..4c5c87c0 100644 --- a/src/content/docs/aws/getting-started/installation.mdx +++ b/src/content/docs/aws/getting-started/installation.mdx @@ -246,7 +246,7 @@ Docker Compose v1.9.0 and above is supported. -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -264,7 +264,7 @@ services: -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/getting-started/quickstart.mdx b/src/content/docs/aws/getting-started/quickstart.mdx index 92ab4b69..dcf4c4cf 100644 --- a/src/content/docs/aws/getting-started/quickstart.mdx +++ b/src/content/docs/aws/getting-started/quickstart.mdx @@ -169,7 +169,7 @@ awslocal sns subscribe \ #### Create the Presign Lambda -```bash showLineNumbers +```bash showshowLineNumbers (cd lambdas/presign; rm -f lambda.zip; zip lambda.zip handler.py) awslocal lambda create-function \ --function-name presign \ @@ -187,7 +187,7 @@ $ awslocal lambda create-function-url-config \ #### Create the Image List Lambda -```bash showLineNumbers +```bash showshowLineNumbers (cd lambdas/list; rm -f lambda.zip; zip lambda.zip handler.py) awslocal lambda create-function \ --function-name list \ @@ -207,7 +207,7 @@ $ awslocal lambda create-function-url-config \ - ```bash showLineNumbers + ```bash showshowLineNumbers cd lambdas/resize rm -rf libs lambda.zip docker run --platform linux/x86_64 -v "$PWD":/var/task "public.ecr.aws/sam/build-python3.11" /bin/sh -c "pip install -r requirements.txt -t libs; exit" @@ -218,7 +218,7 @@ $ awslocal lambda create-function-url-config \ ``` - ```bash showLineNumbers + ```bash showshowLineNumbers cd lambdas/resize rm -rf package lambda.zip mkdir package @@ -230,7 +230,7 @@ $ awslocal lambda create-function-url-config \ ``` - ```bash showLineNumbers + ```bash showshowLineNumbers cd lambdas/resize rm -rf package lambda.zip mkdir package @@ -245,7 +245,7 @@ $ awslocal lambda create-function-url-config \ #### Create the Image Resizer Lambda -```bash showLineNumbers +```bash showshowLineNumbers awslocal lambda create-function \ --function-name resize \ --runtime python3.11 \ diff --git a/src/content/docs/aws/integrations/app-frameworks/quarkus.md b/src/content/docs/aws/integrations/app-frameworks/quarkus.md index 08413f6d..c046b0cd 100644 --- a/src/content/docs/aws/integrations/app-frameworks/quarkus.md +++ b/src/content/docs/aws/integrations/app-frameworks/quarkus.md @@ -30,7 +30,7 @@ The Lambda extension is based on [AWS Java SDK 2.x](https://docs.aws.amazon.com/ Create a new project with the following command: -```bash showLineNumbers +```bash showshowLineNumbers mvn io.quarkus.platform:quarkus-maven-plugin:3.6.3:create \ -DprojectGroupId=org.acme \ -DprojectArtifactId=amazon-lambda-quickstart \ @@ -72,7 +72,7 @@ Add the following dependencies to the `pom.xml` file: To configure LocalStack, add the following properties to the `application.properties` file: -```bash showLineNumbers +```bash showshowLineNumbers quarkus.lambda.endpoint-override=http://localhost:4566 quarkus.lambda.aws.region=us-east-1 diff --git a/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md b/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md index 6237ff8b..b7e5a14d 100644 --- a/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md +++ b/src/content/docs/aws/integrations/app-frameworks/serverless-framework.md @@ -26,7 +26,7 @@ This guide assumes that you have the following tools installed. It also assumes that you already have a Serverless app set up consisting of a couple of Lambda functions and a `serverless.yml` file similar to the following. An example Serverless app integrated with LocalStack can be found here: Simple REST API using the Serverless Framework and LocalStack -```yaml showLineNumbers +```yaml showshowLineNumbers service: my-service frameworkVersion: ">=1.1.0 <=2.50.0" @@ -115,7 +115,7 @@ Hence, you need to configure the Lambda functions to use the `AWS_ENDPOINT_URL` In Python, this may look something like. The code detects if it is running in LocalStack by checking if the `AWS_ENDPOINT_URL` variable exists and then configures the endpoint URL accordingly. -```python showLineNumbers +```python showshowLineNumbers ... if 'AWS_ENDPOINT_URL' in os.environ: dynamodb = boto3.resource('dynamodb', endpoint_url=os.environ['AWS_ENDPOINT_URL']) @@ -182,7 +182,7 @@ Use the displayed endpoint `http://localhost:4566/restapis/XXXXXXXXXX/local/_use serverless-localstack supports a feature for lambda functions that allows local code mounting: -```yaml showLineNumbers +```yaml showshowLineNumbers # serverless.yml custom: diff --git a/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx b/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx index 91bbbe73..4e1efa51 100644 --- a/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx +++ b/src/content/docs/aws/integrations/app-frameworks/spring-cloud-function.mdx @@ -118,7 +118,7 @@ To that end, we use the "Shadow Jar" plugin. Here's our final `build.gradle`: -```groovy showLineNumbers=true title="build.gradle" +```groovy showshowLineNumbers=true title="build.gradle" plugins { id "java" id "org.jetbrains.kotlin.jvm" version '1.5.31' @@ -210,7 +210,7 @@ In this project, we are following [official documentation](https://docs.aws.amazon.com/lambda/latest/dg/java-logging.html#java-wt-logging-using-log4j2.8) to setup up `src/main/resources/log4j2.xml` content. -```xml title="log4j2.xml" showLineNumbers +```xml title="log4j2.xml" showshowLineNumbers ?xml version="1.0" encoding="UTF-8"?> @@ -255,7 +255,7 @@ org.springframework.cloud.function.adapter.aws.FunctionInvoker::handleRequest Now our application needs an entry-class, the one we referenced earlier. Let's add it under `src/main/kotlin/org/localstack/sampleproject/Application.kt`. -```kotlin showLineNumbers +```kotlin showshowLineNumbers package org.localstack.sampleproject import org.springframework.boot.autoconfigure.SpringBootApplication @@ -276,7 +276,7 @@ Let's configure it by creating a new configuration class `JacksonConfiguration.k `src/main/kotlin/org/localstack/sampleproject/config`: -```kotlin title="JacksonConfiguration.kt" showLineNumbers +```kotlin title="JacksonConfiguration.kt" showshowLineNumbers package org.localstack.sampleproject.config import com.fasterxml.jackson.annotation.JsonInclude @@ -319,7 +319,7 @@ implementations. Let's create a small logging utility to simplify interactions with the logger -```kotlin title="Logger.kt" showLineNumbers +```kotlin title="Logger.kt" showshowLineNumbers package org.localstack.sampleproject.util import org.apache.logging.log4j.LogManager @@ -343,7 +343,7 @@ Your application may even support multiple protocols with different request/resp Let's define utility functions to to build API gateway responses: -```kotlin showLineNumbers +```kotlin showshowLineNumbers package org.localstack.sampleproject.util import org.springframework.messaging.Message @@ -369,7 +369,7 @@ fun buildJsonErrorResponse(message: String, code: Int = 500) = And now a utility function to process API Gateway requests: -```kotlin showLineNumbers +```kotlin showshowLineNumbers package org.localstack.sampleproject.util import com.amazonaws.services.lambda.runtime.events.APIGatewayProxyRequestEvent @@ -401,7 +401,7 @@ To transfer data from requests into something more meaningful than JSON strings (and back) you will be using a lot of Models and Data Transfer Objects (DTOs). It's time to define our first one. -```kotlin showLineNumbers title="SampleModel.kt" +```kotlin showshowLineNumbers title="SampleModel.kt" package org.localstack.sampleproject.model import com.fasterxml.jackson.annotation.JsonIgnore @@ -420,7 +420,7 @@ data class SampleModel( Let's add our first endpoints to simulate CRUD operations on previously defined `SampleModel`: -```kotlin showLineNumbers title="SampleApi.kt" +```kotlin showshowLineNumbers title="SampleApi.kt" package org.localstack.sampleproject.api import com.fasterxml.jackson.databind.ObjectMapper @@ -473,7 +473,7 @@ We know Java's cold start is always a pain. To minimize this pain, we will try to define a pre-warming endpoint within the Rest API. By invoking this function every 5-10 mins we can make sure Rest API lambda is always kept in a pre-warmed state. -```kotlin showLineNumbers title="ScheduleApi.kt" +```kotlin showshowLineNumbers title="ScheduleApi.kt" package org.localstack.sampleproject.api import com.fasterxml.jackson.databind.ObjectMapper @@ -515,7 +515,7 @@ We can still define pure lambda functions, DynamoDB stream handlers, and so on. Below you can find a little example of few lambda functions grouped in `LambdaApi` class. -```kotlin showLineNumbers title="LambdaApi.kt" +```kotlin showshowLineNumbers title="LambdaApi.kt" package org.localstack.sampleproject.api import com.amazonaws.services.lambda.runtime.events.DynamodbEvent @@ -575,7 +575,7 @@ for usage examples. -```yaml showLineNumbers +```yaml showshowLineNumbers service: localstack-sampleproject-serverless provider: @@ -628,7 +628,7 @@ functions: ``` -```java title="ApplicationStack.kt" showLineNumbers +```java title="ApplicationStack.kt" showshowLineNumbers package org.localstack.cdkstack import java.util.UUID @@ -703,7 +703,7 @@ class ApplicationStack(parent: Construct, name: String) : Stack(parent, name) { ``` -```hcl title="variables.tf" showLineNumbers +```hcl title="variables.tf" showshowLineNumbers variable "STAGE" { type = string default = "local" diff --git a/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md b/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md index 5b6be787..acf77ba0 100644 --- a/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md +++ b/src/content/docs/aws/integrations/aws-native-tools/aws-sam.md @@ -69,7 +69,7 @@ The `samlocal` wrapper will package and deploy the application to LocalStack. To debug your Lambda functions in VS Code while using the SAM CLI's `sam local` command alongside other services provided by LocalStack, set up a launch configuration in the `.vscode/launch.json` file. Insert the following settings into the file: -```json showLineNumbers +```json showshowLineNumbers { "type": "aws-sam", "request": "direct-invoke", diff --git a/src/content/docs/aws/integrations/aws-sdks/cpp.md b/src/content/docs/aws/integrations/aws-sdks/cpp.md index 5f99243f..ccb2fb68 100644 --- a/src/content/docs/aws/integrations/aws-sdks/cpp.md +++ b/src/content/docs/aws/integrations/aws-sdks/cpp.md @@ -15,7 +15,7 @@ which is the preferred way of integrating the C++ SDK with LocalStack. Consider the following example, which creates an SQS queue, sends a message to it, then receives the same message via the SDK: -```cpp showLineNumbers +```cpp showshowLineNumbers #include #include #include diff --git a/src/content/docs/aws/integrations/aws-sdks/go.mdx b/src/content/docs/aws/integrations/aws-sdks/go.mdx index 850804ac..c98ff08b 100644 --- a/src/content/docs/aws/integrations/aws-sdks/go.mdx +++ b/src/content/docs/aws/integrations/aws-sdks/go.mdx @@ -24,7 +24,7 @@ Full examples for both SDK versions can be found [in our samples repository](htt - ```go showLineNumbers + ```go showshowLineNumbers package main import ( @@ -52,7 +52,7 @@ func main() { -```go showLineNumbers +```go showshowLineNumbers package main import ( diff --git a/src/content/docs/aws/integrations/aws-sdks/java.mdx b/src/content/docs/aws/integrations/aws-sdks/java.mdx index 1e5758b2..45a76a32 100644 --- a/src/content/docs/aws/integrations/aws-sdks/java.mdx +++ b/src/content/docs/aws/integrations/aws-sdks/java.mdx @@ -41,7 +41,7 @@ The client can be used to upload a file to an existing bucket and then retrieve -```java showLineNumbers +```java showshowLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -57,7 +57,7 @@ AmazonS3 s3Client = AmazonS3ClientBuilder.standard() -```java showLineNumbers +```java showshowLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -81,7 +81,7 @@ S3Client s3Client = S3Client.builder() -```java showLineNumbers +```java showshowLineNumbers // Existing bucket name. final String BUCKET_NAME = "records"; @@ -110,7 +110,7 @@ BufferedReader reader = new BufferedReader(new InputStreamReader(objectInputStre -```java showLineNumbers +```java showshowLineNumbers // Existing bucket name. final String BUCKET_NAME = "records"; @@ -154,7 +154,7 @@ The full list of supported converters can be found [here](https://sdk.amazonaws. -```java showLineNumbers +```java showshowLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -172,7 +172,7 @@ private static AmazonDynamoDB dynamoDBClient = AmazonDynamoDBClientBuilder.stand ``` -```java showLineNumbers +```java showshowLineNumbers // Credentials that can be replaced with real AWS values. (To be handled properly and not hardcoded.) // These can be skipped altogether for LocalStack, but we generally want to avoid discrepancies with production code. final String ACCESS_KEY = "test"; @@ -206,7 +206,7 @@ DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() -```java showLineNumbers +```java showshowLineNumbers // Existing table name String TABLE_NAME = "person"; @@ -246,7 +246,7 @@ person.setBirthdateFromString(item.getString("birthdate")); -```java showLineNumbers +```java showshowLineNumbers // Existing table name. String TABLE_NAME = "person"; diff --git a/src/content/docs/aws/integrations/aws-sdks/javascript.mdx b/src/content/docs/aws/integrations/aws-sdks/javascript.mdx index 4a819638..c425ce96 100644 --- a/src/content/docs/aws/integrations/aws-sdks/javascript.mdx +++ b/src/content/docs/aws/integrations/aws-sdks/javascript.mdx @@ -23,7 +23,7 @@ Here is an example of how to create a Lambda client and an S3 client with the en -```javascript showLineNumbers +```javascript showshowLineNumbers const AWS = require('aws-sdk'); // Configure the AWS SDK to use the LocalStack endpoint and credentials @@ -70,7 +70,7 @@ s3.listBuckets((err, data) => { ``` -```javascript showLineNumbers +```javascript showshowLineNumbers const { LambdaClient, ListFunctionsCommand } = require('@aws-sdk/client-lambda'); const { S3Client, ListBucketsCommand } = require('@aws-sdk/client-s3'); diff --git a/src/content/docs/aws/integrations/aws-sdks/net.md b/src/content/docs/aws/integrations/aws-sdks/net.md index 238c0770..c9bf185e 100644 --- a/src/content/docs/aws/integrations/aws-sdks/net.md +++ b/src/content/docs/aws/integrations/aws-sdks/net.md @@ -86,7 +86,7 @@ The library aims to reduce the boilerplate required to set up LocalStack clients #### Dependency Injection Approach -```csharp showLineNumbers +```csharp showshowLineNumbers public void ConfigureServices(IServiceCollection services) { // Add framework services. @@ -104,7 +104,7 @@ var amazonS3Client = serviceProvider.GetRequiredService(); #### Standalone Approach -```csharp showLineNumbers +```csharp showshowLineNumbers var sessionOptions = new SessionOptions(); var configOptions = new ConfigOptions(); diff --git a/src/content/docs/aws/integrations/aws-sdks/php.md b/src/content/docs/aws/integrations/aws-sdks/php.md index c0e82467..80c3cbc5 100644 --- a/src/content/docs/aws/integrations/aws-sdks/php.md +++ b/src/content/docs/aws/integrations/aws-sdks/php.md @@ -15,7 +15,7 @@ which is the preferred way of integrating the PHP SDK with LocalStack. Here is an example of how to create an `S3Client` with the endpoint set to LocalStack. -```php showLineNumbers +```php showshowLineNumbers use Aws\S3\S3Client; use Aws\Exception\AwsException; diff --git a/src/content/docs/aws/integrations/aws-sdks/python-boto3.md b/src/content/docs/aws/integrations/aws-sdks/python-boto3.md index 453bf5eb..fed2ee9a 100644 --- a/src/content/docs/aws/integrations/aws-sdks/python-boto3.md +++ b/src/content/docs/aws/integrations/aws-sdks/python-boto3.md @@ -11,7 +11,7 @@ sidebar: You can easily create a `boto3` client that interacts with your LocalStack instance. The example below creates a `boto3` client that lists all available Lambda functions: -```python showLineNumbers +```python showshowLineNumbers import boto3 endpoint_url = "http://localhost.localstack.cloud:4566" diff --git a/src/content/docs/aws/integrations/aws-sdks/ruby.md b/src/content/docs/aws/integrations/aws-sdks/ruby.md index bdf34428..7a6c3594 100644 --- a/src/content/docs/aws/integrations/aws-sdks/ruby.md +++ b/src/content/docs/aws/integrations/aws-sdks/ruby.md @@ -14,7 +14,7 @@ The [AWS SDK for Ruby](https://aws.amazon.com/sdk-for-ruby/), like other AWS SDK Here is an example of how to create a S3 bucket with the AWS configuration endpoint set to LocalStack: -```ruby showLineNumbers +```ruby showshowLineNumbers require "aws-sdk-s3" # Wraps Amazon S3 bucket actions. @@ -89,7 +89,7 @@ The S3 service endpoint differs slightly from the other service endpoints becaus For alternative AWS services, you can use the following configuration: -```ruby showLineNumbers +```ruby showshowLineNumbers region = "us-east-2" Aws.config.update( endpoint: 'http://localhost:4566', # update with localstack endpoint diff --git a/src/content/docs/aws/integrations/containers/devcontainers.mdx b/src/content/docs/aws/integrations/containers/devcontainers.mdx index 08f3e981..4ba51c06 100644 --- a/src/content/docs/aws/integrations/containers/devcontainers.mdx +++ b/src/content/docs/aws/integrations/containers/devcontainers.mdx @@ -39,7 +39,7 @@ Before you start, ensure that you have the [DevContainer CLI](https://code.visua Create a JSON file called `options.json` with the desired options in it. -```json showLineNumbers +```json showshowLineNumbers { "imageVariant": "bullseye", "awslocal": "true", @@ -149,7 +149,7 @@ To get started with LocalStack and DevContainers in VS Code, follow these steps: The `devcontainer.json` will look similar to the following: -```json showLineNumbers +```json showshowLineNumbers { "name": "LocalStack DinD setup", "image": "mcr.microsoft.com/devcontainers/base:bullseye", @@ -219,7 +219,7 @@ Before you start, ensure that you have the [DevContainer CLI](https://code.visu Create a JSON file called `options.json` with the desired options in it. -```json showLineNumbers +```json showshowLineNumbers { "imageVariant": "bookworm", "awslocal": "true", @@ -337,7 +337,7 @@ To get started with LocalStack and DevContainers in VS Code, follow these steps: -```json showLineNumbers +```json showshowLineNumbers { "name": "LocalStack DooD setup", "dockerComposeFile": "docker-compose.yml", @@ -362,7 +362,7 @@ To get started with LocalStack and DevContainers in VS Code, follow these steps: ``` -```yml showLineNumbers +```yml showshowLineNumbers services: localstack: container_name: "localstack-main" @@ -413,7 +413,7 @@ FROM mcr.microsoft.com/devcontainers/base:bookworm ``` -```bash showLineNumbers +```bash showshowLineNumbers # Activate LocalStack Pro: https://docs.localstack.cloud/getting-started/auth-token/ LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:-} # required for Pro, not processed via template due to security reasons LOCALSTACK_API_KEY=${LOCALSTACK_API_KEY:-} diff --git a/src/content/docs/aws/integrations/containers/gitpod.md b/src/content/docs/aws/integrations/containers/gitpod.md index 89610df3..36ed0744 100644 --- a/src/content/docs/aws/integrations/containers/gitpod.md +++ b/src/content/docs/aws/integrations/containers/gitpod.md @@ -23,7 +23,7 @@ To configure LocalStack on Gitpod, you would need to set up a `.gitpod.yml` on t The file configures your workspace and the environment that you would like to use. You can find more information on the [Gitpod documentation](https://www.gitpod.io/docs/config-gitpod-file/). -```yaml showLineNumbers +```yaml showshowLineNumbers tasks: - name: start-localstack env: diff --git a/src/content/docs/aws/integrations/containers/kubernetes.mdx b/src/content/docs/aws/integrations/containers/kubernetes.mdx index 9af64edc..12d2741d 100644 --- a/src/content/docs/aws/integrations/containers/kubernetes.mdx +++ b/src/content/docs/aws/integrations/containers/kubernetes.mdx @@ -68,7 +68,7 @@ You can use this chart with LocalStack Pro by: You can set these values in a YAML file (in this example `pro-values.yaml`): -```yaml showLineNumbers +```yaml showshowLineNumbers image: repository: localstack/localstack-pro @@ -79,7 +79,7 @@ extraEnvVars: If you have the LocalStack Auth Token in a secret, you can also reference it directly with `extraEnvVars`: -```yaml showLineNumbers +```yaml showshowLineNumbers extraEnvVars: - name: LOCALSTACK_AUTH_TOKEN valueFrom: diff --git a/src/content/docs/aws/integrations/containers/rancher-desktop.mdx b/src/content/docs/aws/integrations/containers/rancher-desktop.mdx index 80b6faf7..774be546 100644 --- a/src/content/docs/aws/integrations/containers/rancher-desktop.mdx +++ b/src/content/docs/aws/integrations/containers/rancher-desktop.mdx @@ -123,7 +123,7 @@ Modify your Docker Compose configuration to work with Rancher Desktop: -```yml showLineNumbers +```yml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -141,7 +141,7 @@ services: ``` -```yml showLineNumbers +```yml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/integrations/continuous-integration/bitbucket.md b/src/content/docs/aws/integrations/continuous-integration/bitbucket.md index d73b2af7..375cff6d 100644 --- a/src/content/docs/aws/integrations/continuous-integration/bitbucket.md +++ b/src/content/docs/aws/integrations/continuous-integration/bitbucket.md @@ -22,7 +22,7 @@ When you want to integrate LocalStack into your job configuration, you just have The following example BitBucket Pipeline configuration (`bitbucket-pipelines.yaml`) executes these steps, creates a new S3 bucket, and queries the list of S3 buckets: -```yaml showLineNumbers +```yaml showshowLineNumbers image: python:3.9 definitions: @@ -69,7 +69,7 @@ To add a CI Auth Token to your BitBucket Pipeline: Navigate to your BitBucket Pipeline and add the following lines to the `bitbucket-pipelines.yaml` file: -```yaml showLineNumbers +```yaml showshowLineNumbers pipelines: default: - step: diff --git a/src/content/docs/aws/integrations/continuous-integration/circleci.md b/src/content/docs/aws/integrations/continuous-integration/circleci.md index 70823967..202b5897 100644 --- a/src/content/docs/aws/integrations/continuous-integration/circleci.md +++ b/src/content/docs/aws/integrations/continuous-integration/circleci.md @@ -17,7 +17,7 @@ LocalStack supports CircleCI out of the box and can be easily integrated into yo #### Default -```yaml showLineNumbers +```yaml showshowLineNumbers version: '2.1' orbs: localstack: localstack/platform@2.2 @@ -36,7 +36,7 @@ workflows: #### Async -```yaml showLineNumbers +```yaml showshowLineNumbers version: '2.1' orbs: localstack: localstack/platform@2.2 @@ -61,7 +61,7 @@ Read more about the [configuration options](/aws/capabilities/config/configurati #### Job level -```yaml showLineNumbers +```yaml showshowLineNumbers ... jobs: localstack-test: @@ -75,7 +75,7 @@ jobs: #### Shell command -```yaml showLineNumbers +```yaml showshowLineNumbers ... jobs: localstack-test: @@ -105,7 +105,7 @@ After the above steps, just start up LocalStack using our official orb as usual. ### Dump LocalStack logs -```yaml showLineNumbers +```yaml showshowLineNumbers ... jobs: localstack-test: @@ -140,7 +140,7 @@ Update or create the Cloud Pod in it's own project (ie in a separate Infrastruct _Note: If there is a previously created Cloud Pod which doesn't need updating this step can be skipped._ -```yaml showLineNumbers +```yaml showshowLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -168,7 +168,7 @@ workflows: In a separate project use the previously created base Cloud Pod as below: -```yaml showLineNumbers +```yaml showshowLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -196,7 +196,7 @@ To use a dynamically updated Cloud Pod in multiple workflows but in the same pro Before you are able to use any stored artifacts in your pipeline, you must provide either a valid [project API token](https://circleci.com/docs/managing-api-tokens/#creating-a-project-api-token) or a [personal API token](https://circleci.com/docs/managing-api-tokens/#creating-a-personal-api-token) to CircleCI. -```yaml showLineNumbers +```yaml showshowLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -275,7 +275,7 @@ Find out more about [Ephemeral Instances](/aws/capabilities/cloud-sandbox/epheme ##### Same job -```yaml showLineNumbers +```yaml showshowLineNumbers orbs: localstack: localstack/platform@2.2 ... @@ -302,7 +302,7 @@ workflows: ##### Multiple jobs -```yaml showLineNumbers +```yaml showshowLineNumbers ... jobs: setup-instance: @@ -350,7 +350,7 @@ workflows: This strategy persist LocalStack's state between jobs for the current workflow. -```yaml showLineNumbers +```yaml showshowLineNumbers ... jobs: localstack-save-state: @@ -395,7 +395,7 @@ More information about Localstack's [state import/export](/aws/capabilities/stat To preserve state between workflow runs, you can take leverage of CircleCI's caching too. This strategy will persist LocalStack's state for every workflow re-runs, but not for different workflows. -```yaml showLineNumbers +```yaml showshowLineNumbers ... jobs: localstack-update-state: diff --git a/src/content/docs/aws/integrations/continuous-integration/codebuild.md b/src/content/docs/aws/integrations/continuous-integration/codebuild.md index fdb75531..c1298585 100644 --- a/src/content/docs/aws/integrations/continuous-integration/codebuild.md +++ b/src/content/docs/aws/integrations/continuous-integration/codebuild.md @@ -25,7 +25,7 @@ CodeBuild has the capability to use LocalStack's GitHub Action. #### Native Runner -```yml showLineNumbers +```yml showshowLineNumbers version: 0.2 ... phases: @@ -39,7 +39,7 @@ phases: #### GitHub Actions Runner -```yml showLineNumbers +```yml showshowLineNumbers version: 0.2 phases: @@ -60,7 +60,7 @@ Get know more about the LocalStack [config options](/aws/capabilities/config/con #### Native Runner -```yml showLineNumbers +```yml showshowLineNumbers version: 0.2 env: @@ -73,7 +73,7 @@ phases: #### GitHub Actions Runner -```yml showLineNumbers +```yml showshowLineNumbers version: 0.2 env: @@ -111,7 +111,7 @@ Navigate to the buildspec file and change the Docker image to `public.ecr.aws/lo #### Native Runner -```yaml showLineNumbers +```yaml showshowLineNumbers ... phases: pre_build: @@ -123,7 +123,7 @@ phases: #### GitHub Actions Runner -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: @@ -140,7 +140,7 @@ phases: ### Dump LocalStack logs -```yaml showLineNumbers +```yaml showshowLineNumbers ... artifacts: files: @@ -174,7 +174,7 @@ Find more information about cloud pods [here](/aws/capabilities/state-management ##### Native Runner -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: @@ -189,7 +189,7 @@ phases: ##### GitHub Actions Runner -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: @@ -214,7 +214,7 @@ phases: #### Ephemeral Instances (Preview) -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: @@ -243,7 +243,7 @@ Find out more about [ephemeral instances](/aws/capabilities/cloud-sandbox/epheme Find out more about [state management](/aws/capabilities/state-management/export-import-state/). -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: @@ -259,7 +259,7 @@ artifact: Alternatively save as a secondary artifact: -```yml showLineNumbers +```yml showshowLineNumbers ... artifact: ... @@ -278,7 +278,7 @@ Additional information about [state export and import](/aws/capabilities/state-m ##### Native Runner -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: @@ -295,7 +295,7 @@ cache: ##### GitHub Actions Runner -```yml showLineNumbers +```yml showshowLineNumbers ... phases: pre_build: diff --git a/src/content/docs/aws/integrations/continuous-integration/github-actions.md b/src/content/docs/aws/integrations/continuous-integration/github-actions.md index e980ec64..c39e896f 100644 --- a/src/content/docs/aws/integrations/continuous-integration/github-actions.md +++ b/src/content/docs/aws/integrations/continuous-integration/github-actions.md @@ -12,7 +12,7 @@ This page contains easily customisable snippets to show you how to manage LocalS ### Start up Localstack -```yaml showLineNumbers +```yaml showshowLineNumbers - name: Start LocalStack uses: LocalStack/setup-localstack@v0.2.2 with: @@ -25,7 +25,7 @@ This page contains easily customisable snippets to show you how to manage LocalS To set LocalStack configuration options, you can use the `configuration` input parameter. For example, to set the `DEBUG` configuration option, you can use the following configuration: -```yml showLineNumbers +```yml showshowLineNumbers - name: Start LocalStack uses: LocalStack/setup-localstack@v0.2.2 with: @@ -50,7 +50,7 @@ Click **Add secret** to save your secret. You can then use our [`setup-localstack`](https://github.com/localstack/setup-localstack) GitHub Action to start your LocalStack container, with the `LOCALSTACK_AUTH_TOKEN` environment variable: -```yaml showLineNumbers +```yaml showshowLineNumbers - name: Start LocalStack uses: LocalStack/setup-localstack@v0.2.3 with: @@ -63,7 +63,7 @@ You can then use our [`setup-localstack`](https://github.com/localstack/setup-lo ### Dump Localstack logs -```yaml showLineNumbers +```yaml showshowLineNumbers - name: Show localstack logs run: | localstack logs | tee localstack.log @@ -75,7 +75,7 @@ You can preserve your AWS infrastructure with Localstack in various ways. #### Cloud Pods -```yaml showLineNumbers +```yaml showshowLineNumbers ... # Localstack is up and running already - name: Load the Cloud Pod @@ -106,7 +106,7 @@ Our Github Action contains the prebuilt functionality to spin up an ephemeral in First you need to deploy the preview: -```yaml showLineNumbers +```yaml showshowLineNumbers name: Create PR Preview on: @@ -141,7 +141,7 @@ Find out more about ephemeral instances [here](/aws/capabilities/cloud-sandbox/e #### Artifact -```yaml showLineNumbers +```yaml showshowLineNumbers ... - name: Start LocalStack and Load State uses: LocalStack/setup-localstack@v0.2.2 diff --git a/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md b/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md index e0556cc5..7ef9f36a 100644 --- a/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md +++ b/src/content/docs/aws/integrations/continuous-integration/gitlab-ci.md @@ -28,7 +28,7 @@ HOSTNAME_EXTERNAL: localhost.localstack.cloud. #### Service -```yaml showLineNumbers +```yaml showshowLineNumbers ... variables: DOCKER_SOCK: tcp://docker:2375 @@ -46,7 +46,7 @@ services: #### Container -```yaml showLineNumbers +```yaml showshowLineNumbers image: docker:latest stages: @@ -87,7 +87,7 @@ After you create the variable, you can use it in the `.gitlab-ci.yml` file. However, variables set in the GitLab UI are not automatically passed down to service containers. You need to assign them as variables in the UI, and then re-assign them in your `.gitlab-ci.yml`. -```yaml showLineNumbers +```yaml showshowLineNumbers ... variables: LOCALSTACK_AUTH_TOKEN: $LOCALSTACK_AUTH_TOKEN @@ -103,7 +103,7 @@ If the CI Auth Token activation fails, LocalStack container will exit with an er ### Dump Localstack logs -```yaml showLineNumbers +```yaml showshowLineNumbers ... job: variables: @@ -121,7 +121,7 @@ You can preserve your AWS infrastructure with Localstack in various ways. #### Artifact -```yaml showLineNumbers +```yaml showshowLineNumbers ... job: before_script: @@ -140,7 +140,7 @@ More info about Localstack's state export and import [here](/aws/capabilities/st #### Cache -```yaml showLineNumbers +```yaml showshowLineNumbers ... job: before_script: @@ -163,7 +163,7 @@ Additional information about state export and import [here](/aws/capabilities/st #### Cloud Pod -```yaml showLineNumbers +```yaml showshowLineNumbers ... job: before_script: @@ -178,7 +178,7 @@ Find more information about cloud pods [here](/aws/capabilities/state-management #### Ephemeral Instance (Preview) -```yaml showLineNumbers +```yaml showshowLineNumbers ... variables: LOCALSTACK_AUTH_TOKEN: $LOCALSTACK_AUTH_TOKEN diff --git a/src/content/docs/aws/integrations/continuous-integration/travis-ci.md b/src/content/docs/aws/integrations/continuous-integration/travis-ci.md index f4fe2e12..d7fdae38 100644 --- a/src/content/docs/aws/integrations/continuous-integration/travis-ci.md +++ b/src/content/docs/aws/integrations/continuous-integration/travis-ci.md @@ -19,7 +19,7 @@ When you want to integrate LocalStack into your job configuration, you just have The following example Travis CI job config (`.travis.yaml`) executes these steps, creates a new S3 bucket, and prints a nice message in the end: -```yaml showLineNumbers +```yaml showshowLineNumbers language: python services: @@ -56,7 +56,7 @@ To configure this in Travis CI, go to the project settings (`More options` → ` Here is an example workflow: -```yaml showLineNumbers +```yaml showshowLineNumbers before_install: # Install the LocalStack CLI and awslocal - python -m pip install localstack awscli-local[ver1] diff --git a/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md b/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md index f196840a..ed42f5f7 100644 --- a/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md +++ b/src/content/docs/aws/integrations/infrastructure-as-code/cloud-custodian.md @@ -60,7 +60,7 @@ You can navigate to the LocalStack logs to verify that the EC2 instance was crea You can now create a Cloud Custodian policy to stop the EC2 instances with the tag `Custodian`. Create a file named `custodian.yml` and add the following content: -```yaml showLineNumbers +```yaml showshowLineNumbers policies: - name: my-first-policy resource: aws.ec2 diff --git a/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md b/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md index 51cdfd73..9c4c0cf1 100644 --- a/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md +++ b/src/content/docs/aws/integrations/infrastructure-as-code/crossplane.md @@ -116,7 +116,7 @@ EOF ``` Finally, we create an AWS `ProviderConfig` that references the secret created above, and defines a static `endpoint` pointing to the LocalStack URL `http://host.docker.internal:4566`: -```bash showLineNumbers +```bash showshowLineNumbers cat < -```python showLineNumbers +```python showshowLineNumbers AWS_CONFIG = { "region": "us-east-1", "endpoints": [ @@ -292,7 +292,7 @@ AWS_CONFIG = { ``` -```javascript showLineNumbers +```javascript showshowLineNumbers export const AWS_CONFIG = { region: "us-east-1", endpoints: [ @@ -331,7 +331,7 @@ You can further import the above configuration in your project's code, and use i -```python showLineNumbers +```python showshowLineNumbers ... from localstack_config import AWS_CONFIG ... @@ -340,7 +340,7 @@ AwsProvider(self, "Aws", **AWS_CONFIG) ``` -```javascript showLineNumbers +```javascript showshowLineNumbers ... import { AWS_CONFIG } from "./localstack-config"; ... @@ -372,7 +372,7 @@ Create a new directory named `cdktf-localstack` and initialize a new CDKTF proje -```python showLineNumbers +```python showshowLineNumbers $ cdktf init ... ? Do you want to continue with Terraform Cloud remote state management? No @@ -389,7 +389,7 @@ Note: You can always add providers using 'cdktf provider add' later on ``` -```javascript showLineNumbers +```javascript showshowLineNumbers $ cdktf init ... ? Do you want to continue with Terraform Cloud remote state management? No @@ -426,7 +426,7 @@ Add the following code to import the AWS provider and create a new S3 bucket in -```python showLineNumbers +```python showshowLineNumbers # !/usr/bin/env python from constructs import Construct @@ -458,7 +458,7 @@ app.synth() ``` -```javascript showLineNumbers +```javascript showshowLineNumbers import { Construct } from "constructs"; import { App, TerraformStack } from "cdktf"; import { AwsProvider } from "@cdktf/provider-aws/lib/provider"; @@ -498,7 +498,7 @@ cdktf synth && cdktf deploy You should see the following output: -```sh showLineNumbers +```sh showshowLineNumbers example Initializing the backend... example Successfully configured the backend "local"! Terraform will automatically @@ -543,7 +543,7 @@ Terragrunt is an open-source wrapper for Terraform that provides extra tools for A sample `terragrunt.hcl` configuration file to use with LocalStack is shown below: -```hcl showLineNumbers +```hcl showshowLineNumbers generate "provider" { path = "provider.tf" if_exists = "overwrite_terragrunt" diff --git a/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md b/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md index 50af5dd1..d95ca8b4 100644 --- a/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md +++ b/src/content/docs/aws/integrations/messaging/selfmanaged-kafka-cluster.md @@ -16,13 +16,13 @@ You can find the [example Docker Compose](docker-compose.yml) file which contain 1. Run Docker Compose: -```bash showLineNumbers +```bash showshowLineNumbers docker-compose up -d ``` 2. Create the Lambda function: -```bash showLineNumbers +```bash showshowLineNumbers awslocal lambda create-function \ --function-name fun1 \ --handler lambda.handler \ @@ -54,7 +54,7 @@ awslocal lambda create-function \ 3. Create an example secret: -```bash showLineNumbers +```bash showshowLineNumbers awslocal secretsmanager create-secret --name localstack { "ARN": "arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI", @@ -65,14 +65,14 @@ awslocal secretsmanager create-secret --name localstack 4. Create an example Kafka topic: -```bash showLineNumbers +```bash showshowLineNumbers docker exec -ti kafka kafka-topics --zookeeper zookeeper:2181 --create --replication-factor 1 --partitions 1 --topic t1 Created topic t1. ``` 5. Create the event source mapping to your local kafka cluster: -```bash showLineNumbers +```bash showshowLineNumbers awslocal lambda create-event-source-mapping \ --topics t1 \ --source-access-configuration Type=SASL_SCRAM_512_AUTH,URI=arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI \ diff --git a/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md b/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md index d6a59013..b402fd38 100644 --- a/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md +++ b/src/content/docs/aws/integrations/testing/lambdatest-hyperexecute.md @@ -28,7 +28,7 @@ To get started with HyperExecute, you need to fulfill the following prerequisite Create a new file named `he.yml` in the root directory of your project and add the following content: -```yaml showLineNumbers +```yaml showshowLineNumbers version: "0.1" runson: linux autosplit: true @@ -58,7 +58,7 @@ Subsequently, you need to add your LocalStack Auth Token to your HyperExecute Po To enable test execution on HyperExecute, you need to add the following content to your GitHub Actions workflow file: -```yaml showLineNumbers +```yaml showshowLineNumbers version: "0.1" runson: linux ... @@ -98,7 +98,7 @@ You can find your access key in the HyperExecute Portal. In this example, we will use GitHub Actions to run the tests in the CI pipeline. To do so, you need to add the following content to your GitHub Actions workflow file in `.github/workflows/main.yml`: -```yaml showLineNumbers +```yaml showshowLineNumbers name: Running tests on HyperExecute on: diff --git a/src/content/docs/aws/integrations/testing/testcontainers.mdx b/src/content/docs/aws/integrations/testing/testcontainers.mdx index 8086d679..8896b295 100644 --- a/src/content/docs/aws/integrations/testing/testcontainers.mdx +++ b/src/content/docs/aws/integrations/testing/testcontainers.mdx @@ -39,7 +39,7 @@ go get github.com/testcontainers/testcontainers-go/modules/localstack ``` -```java showLineNumbers +```java showshowLineNumbers org.testcontainers localstack @@ -64,7 +64,7 @@ npm i @testcontainers/localstack -```csharp showLineNumbers +```csharp showshowLineNumbers var localStackContainer = new LocalStackBuilder().Build(); await localStackContainer.StartAsync() @@ -92,14 +92,14 @@ const localstack = new LocalstackContainer("localstack/localstack:3").start() -```csharp showLineNumbers +```csharp showshowLineNumbers var config = new AmazonS3Config(); config.ServiceURL = localStackContainer.GetConnectionString(); using var client = new AmazonS3Client(config); ``` -```go showLineNumbers +```go showshowLineNumbers func s3Client(ctx context.Context, l *localstack.LocalStackContainer) (*s3.Client, error) { // the Testcontainers Docker provider is used to get the host of the Docker daemon provider, err := testcontainers.NewDockerProvider() @@ -144,7 +144,7 @@ func s3Client(ctx context.Context, l *localstack.LocalStackContainer) (*s3.Clien ``` -```java showLineNumbers +```java showshowLineNumbers S3Client s3 = S3Client.builder() .endpointOverride(localstack.getEndpoint()) .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(localstack.getAccessKey(), localstack.getSecretKey()))) @@ -153,7 +153,7 @@ S3Client s3 = S3Client.builder() ``` -```typescript showLineNumbers +```typescript showshowLineNumbers const awsConfig = { endpoint: localstack.getConnectionUri(), credentials: { diff --git a/src/content/docs/aws/services/account.mdx b/src/content/docs/aws/services/account.mdx index e30d9569..26bf9f1b 100644 --- a/src/content/docs/aws/services/account.mdx +++ b/src/content/docs/aws/services/account.mdx @@ -33,7 +33,7 @@ We will demonstrate how to put contact information, fetch account details, and a You can use the [`PutContactInformation`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutContactInformation.html) API to add or update the contact information for your AWS account. Run the following command to add contact information to your account: -```bash showLineNumbers +```bash showshowLineNumbers awslocal account put-contact-information \ --contact-information '{ "FullName": "Jane Doe", @@ -55,7 +55,7 @@ Run the following command to fetch the contact information for your account: awslocal account get-contact-information ``` -```bash title="Output" showLineNumbers +```bash title="Output" showshowLineNumbers { "ContactInformation": { "AddressLine1": "XXXX Main St", @@ -74,7 +74,7 @@ awslocal account get-contact-information You can attach an alternate contact using [`PutAlternateContact`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutAlternateContact.html) API. Run the following command to attach an alternate contact to your account: -```bash showLineNumbers +```bash showshowLineNumbers awslocal account put-alternate-contact \ --alternate-contact-type "BILLING" \ --email-address "bill@ing.com" \ diff --git a/src/content/docs/aws/services/acm.mdx b/src/content/docs/aws/services/acm.mdx index b6d84c20..3945cad0 100644 --- a/src/content/docs/aws/services/acm.mdx +++ b/src/content/docs/aws/services/acm.mdx @@ -27,7 +27,7 @@ Start your LocalStack container using your preferred method, then use the [Reque Specify the domain name you want to request the certificate for, and any additional options you need. Here's an example command: -```bash showLineNumbers +```bash showshowLineNumbers awslocal acm request-certificate \ --domain-name www.example.com \ --validation-method DNS \ diff --git a/src/content/docs/aws/services/apacheflink.mdx b/src/content/docs/aws/services/apacheflink.mdx index bb272d08..0806bc41 100644 --- a/src/content/docs/aws/services/apacheflink.mdx +++ b/src/content/docs/aws/services/apacheflink.mdx @@ -80,7 +80,7 @@ Without the proper permissions policy and role, this example application will no Create an IAM role for the running MSF application to assume. -```json showLineNumbers +```json showshowLineNumbers # role.json { "Version": "2012-10-17", @@ -100,7 +100,7 @@ awslocal iam create-role --role-name msaf-role --assume-role-policy-document fil Next create add a permissions policy to this role that permits read and write access to S3. -```json showLineNumbers +```json showshowLineNumbers # policy.json { "Version": "2012-10-17", @@ -124,7 +124,7 @@ Now, when the running MSF application assumes this role, it will have the necess With all prerequisite resources in place, the Flink application can now be created and started. -```bash showLineNumbers +```bash showshowLineNumbers awslocal kinesisanalyticsv2 create-application \ --application-name msaf-app \ --runtime-environment FLINK-1_20 \ @@ -171,7 +171,7 @@ There are following prerequisites for CloudWatch Logs integration: To add a logging option: -```bash showLineNumbers +```bash showshowLineNumbers awslocal kinesisanalyticsv2 add-application-cloud-watch-logging-option \ --application-name msaf-app \ --cloud-watch-logging-option '{"LogStreamARN": "arn:aws:logs:us-east-1:000000000000:log-group:msaf-log-group:log-stream:msaf-log-stream"}' diff --git a/src/content/docs/aws/services/apigateway.mdx b/src/content/docs/aws/services/apigateway.mdx index f114ca6c..02c4f3a8 100644 --- a/src/content/docs/aws/services/apigateway.mdx +++ b/src/content/docs/aws/services/apigateway.mdx @@ -30,7 +30,7 @@ The Lambda function will be invoked with a `GET` request and return a response w Create a new file named `lambda.js` with the following contents: -```javascript showLineNumbers +```javascript showshowLineNumbers 'use strict' const apiHandler = (payload, context, callback) => { @@ -51,7 +51,7 @@ The above code defines a function named `apiHandler` that returns a response wit Zip the file and upload it to LocalStack using the `awslocal` CLI. Run the following command: -```bash showLineNumbers +```bash showshowLineNumbers zip function.zip lambda.js awslocal lambda create-function \ --function-name apigw-lambda \ @@ -120,7 +120,7 @@ You'll need this ID for the next step. Create a new resource for the API using the [`CreateResource`](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateResource.html) API. Use the ID of the resource returned in the previous step as the parent ID: -```bash showLineNumbers +```bash showshowLineNumbers awslocal apigateway create-resource \ --rest-api-id \ --parent-id \ @@ -144,7 +144,7 @@ You'll need this Resource ID for the next step. Add a `GET` method to the resource using the [`PutMethod`](https://docs.aws.amazon.com/apigateway/latest/api/API_PutMethod.html) API. Use the ID of the resource returned in the previous step as the Resource ID: -```bash showLineNumbers +```bash showshowLineNumbers awslocal apigateway put-method \ --rest-api-id \ --resource-id \ @@ -166,7 +166,7 @@ awslocal apigateway put-method \ Now, create a new integration for the method using the [`PutIntegration`](https://docs.aws.amazon.com/apigateway/latest/api/API_PutIntegration.html) API. -```bash showLineNumbers +```bash showshowLineNumbers awslocal apigateway put-integration \ --rest-api-id \ --resource-id \ @@ -256,7 +256,7 @@ LocalStack provides additional features and functionality on top of the official To demonstrate how to access APIs through LocalStack's local domain name, consider the following Serverless configuration that shows two Lambda functions (`serviceV1` and `serviceV2`) that are connected to an API Gateway v1 (`http` event) and an API Gateway v2 endpoint (`httpApi` event), respectively: -```yaml showLineNumbers +```yaml showshowLineNumbers ... plugins: - serverless-localstack @@ -332,7 +332,7 @@ http://localhost:4566/restapis///_user_request_/ WebSocket APIs provide real-time communication channels between a client and a server. To use WebSockets in LocalStack, you can define a WebSocket route in your Serverless configuration: -```yaml showLineNumbers +```yaml showshowLineNumbers ... plugins: - serverless-localstack @@ -403,7 +403,7 @@ awslocal apigateway create-rest-api --name my-api --tags '{"_custom_id_":"myid12 You can also configure the protocol type, the possible values being `HTTP` and `WEBSOCKET`: -```bash showLineNumbers +```bash showshowLineNumbers awslocal apigatewayv2 create-api \ --name=my-api \ --protocol-type=HTTP --tags="_custom_id_=my-api" diff --git a/src/content/docs/aws/services/appautoscaling.mdx b/src/content/docs/aws/services/appautoscaling.mdx index 8ba59cb2..a035f1fa 100644 --- a/src/content/docs/aws/services/appautoscaling.mdx +++ b/src/content/docs/aws/services/appautoscaling.mdx @@ -28,7 +28,7 @@ We will demonstrate how you can configure auto scaling to handle a heavy workloa To create a new Lambda function, create a new file called `index.js` with the following code: -```js showLineNumbers +```js showshowLineNumbers exports.handler = async (event, context) => { console.log('Hello from Lambda!'); return { diff --git a/src/content/docs/aws/services/appconfig.mdx b/src/content/docs/aws/services/appconfig.mdx index 26644b3a..8afc8b70 100644 --- a/src/content/docs/aws/services/appconfig.mdx +++ b/src/content/docs/aws/services/appconfig.mdx @@ -95,7 +95,7 @@ The following output would be retrieved: You can now create a JSON file to add your feature flag configuration data. Create a file named `feature-flag-config.json` with the following content: -```json showLineNumbers +```json showshowLineNumbers { "allow_mobile_payments": { "enabled": false diff --git a/src/content/docs/aws/services/appsync.mdx b/src/content/docs/aws/services/appsync.mdx index dadbfc57..7a69dbfe 100644 --- a/src/content/docs/aws/services/appsync.mdx +++ b/src/content/docs/aws/services/appsync.mdx @@ -98,7 +98,7 @@ awslocal appsync create-api-key \ Create a file named `schema.graphql` with the following content: -```graphql showLineNumbers +```graphql showshowLineNumbers type Note { NoteId: ID! title: String diff --git a/src/content/docs/aws/services/athena.mdx b/src/content/docs/aws/services/athena.mdx index f55a1104..e4c48830 100644 --- a/src/content/docs/aws/services/athena.mdx +++ b/src/content/docs/aws/services/athena.mdx @@ -235,7 +235,7 @@ print(cursor.fetchall()) -```python showLineNumbers +```python showshowLineNumbers import awswrangler as wr import pandas as pd diff --git a/src/content/docs/aws/services/backup.mdx b/src/content/docs/aws/services/backup.mdx index 7357533a..6c68b0f6 100644 --- a/src/content/docs/aws/services/backup.mdx +++ b/src/content/docs/aws/services/backup.mdx @@ -47,7 +47,7 @@ awslocal backup create-backup-vault \ You can create a backup plan which specifies the backup vault to store the backups in and the schedule for creating backups. You can specify the backup plan in a `backup-plan.json` file: -```json showLineNumbers +```json showshowLineNumbers { "BackupPlanName": "testplan", "Rules": [{ @@ -91,7 +91,7 @@ awslocal backup create-backup-plan \ You can create a backup selection which specifies the resources to backup and the backup plan to associate with. You can specify the backup selection in a `backup-selection.json` file: -```json showLineNumbers +```json showshowLineNumbers { "SelectionName": "Myselection", "IamRoleArn": "arn:aws:iam::000000000000:role/service-role/AWSBackupDefaultServiceRole", diff --git a/src/content/docs/aws/services/cloudformation.mdx b/src/content/docs/aws/services/cloudformation.mdx index a62755da..455ae74a 100644 --- a/src/content/docs/aws/services/cloudformation.mdx +++ b/src/content/docs/aws/services/cloudformation.mdx @@ -34,7 +34,7 @@ Use the following code snippet and save the content in either `cfn-quickstart-st -```yaml showLineNumbers +```yaml showshowLineNumbers Resources: LocalBucket: Type: AWS::S3::Bucket @@ -43,7 +43,7 @@ Resources: ``` -```json showLineNumbers +```json showshowLineNumbers { "Resources": { "LocalBucket": { diff --git a/src/content/docs/aws/services/codebuild.mdx b/src/content/docs/aws/services/codebuild.mdx index 28ab21fa..741c1617 100644 --- a/src/content/docs/aws/services/codebuild.mdx +++ b/src/content/docs/aws/services/codebuild.mdx @@ -47,7 +47,7 @@ Let us walk through these files. It does nothing more than print a salutation message. Create a `MessageUtil.java` file and save it into the `src/main/java` directory. -```java showLineNumbers +```java showshowLineNumbers public class MessageUtil { private String message; @@ -71,7 +71,7 @@ public class MessageUtil { Every build needs to be tested. Therefore, create the `TestMessageUtil.java` file in the `src/test/java` directory. -```java showLineNumbers +```java showshowLineNumbers import org.junit.Test; import org.junit.Ignore; import static org.junit.Assert.assertEquals; @@ -101,7 +101,7 @@ This small suite simply verifies that the greeting message is built correctly. Finally, we need a `pom.xml` file to instruct Maven about what to build and which artifact needs to be produced. Create this file at the root of your directory. -```xml showLineNumbers +```xml showshowLineNumbers @@ -140,7 +140,7 @@ A `buildspec` file is a collection of settings and commands, specified in YAML f Create this `buildspec.yml` file in the root directory. -```yaml showLineNumbers +```yaml showshowLineNumbers version: 0.2 phases: @@ -200,7 +200,7 @@ awslocal s3 cp MessageUtil.zip s3://codebuild-demo-input To properly work, AWS CodeBuild needs access to other AWS services, e.g., to retrieve the source code from a S3 bucket. Create a `create-role.json` file with following content: -```json showLineNumbers +```json showshowLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -227,7 +227,7 @@ it will be needed to create the CodeBuild project later on. Let us now define a policy for the created role. Create a `put-role-policy.json` file with the following content: -```json showLineNumbers +```json showshowLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -302,7 +302,7 @@ awslocal codebuild create-project --generate-cli-skeleton From the generated file, change the source and the artifact location to match the S3 bucket names you just created. Similarly, fill in the ARN of the CodeBuild service role. -```json {hl_lines=[5,9,16]} showLineNumbers +```json {hl_lines=[5,9,16]} showshowLineNumbers { "name": "codebuild-demo-project", "source": { diff --git a/src/content/docs/aws/services/codepipeline.mdx b/src/content/docs/aws/services/codepipeline.mdx index 78b290c2..7403ca24 100644 --- a/src/content/docs/aws/services/codepipeline.mdx +++ b/src/content/docs/aws/services/codepipeline.mdx @@ -61,7 +61,7 @@ This requires a properly configured IAM role that our pipeline can assume. Create the role and make note of the role ARN: -```json showLineNumbers +```json showshowLineNumbers # role.json { "Version": "2012-10-17", @@ -85,7 +85,7 @@ awslocal iam create-role --role-name role --assume-role-policy-document file://r Now add a permissions policy to this role that permits read and write access to S3. -```json showLineNumbers +```json showshowLineNumbers # policy.json { "Version": "2012-10-17", @@ -121,7 +121,7 @@ This is a deploy action which uploads the file to the target bucket. Pay special attention to `roleArn`, `artifactStore.location` as well as `S3Bucket`, `S3ObjectKey`, and `BucketName`. These correspond to the resources we created earlier. -```json {hl_lines=[6,9,26,27,52]} showLineNumbers +```json {hl_lines=[6,9,26,27,52]} showshowLineNumbers # declaration.json { "name": "pipeline", diff --git a/src/content/docs/aws/services/cognito.mdx b/src/content/docs/aws/services/cognito.mdx index 4689c25f..bfc1ba2c 100644 --- a/src/content/docs/aws/services/cognito.mdx +++ b/src/content/docs/aws/services/cognito.mdx @@ -237,7 +237,7 @@ Cognito offers a variety of lifecycle hooks called Cognito Lambda triggers, whic To illustrate, suppose you wish to define a _user migration_ Lambda trigger in order to migrate users from your existing user directory into Amazon Cognito user pools at sign-in. In this case, you can start by creating a Lambda function, let's say named `"migrate_users"`, responsible for performing the migration by creating a new file `index.js` with the following code: -```javascript showLineNumbers +```javascript showshowLineNumbers const validUsers = { belladonna: { password: "12345678Aa!", emailAddress: "bella@example.com" }, }; @@ -379,7 +379,7 @@ awslocal cognito-idp create-resource-server \ You can retrieve the token from your application using the specified endpoint: `http://cognito-idp.localhost.localstack.cloud:4566/_aws/cognito-idp/oauth2/token`. -```javascript showLineNumbers +```javascript showshowLineNumbers require('dotenv').config(); const axios = require('axios'); @@ -419,7 +419,7 @@ Furthermore, you have the option to combine Cognito and LocalStack seamlessly wi For instance, consider this snippet from a `serverless.yml` configuration: -```yaml showLineNumbers +```yaml showshowLineNumbers service: test plugins: diff --git a/src/content/docs/aws/services/docdb.mdx b/src/content/docs/aws/services/docdb.mdx index 1ddd9433..ae3c961d 100644 --- a/src/content/docs/aws/services/docdb.mdx +++ b/src/content/docs/aws/services/docdb.mdx @@ -249,7 +249,7 @@ npm install mongodb@6.3.0 Next, copy the following code into a new file named `index.js` in the `resources` folder: -```javascript showLineNumbers +```javascript showshowLineNumbers const AWS = require('aws-sdk'); const RDS = AWS.RDS; const { MongoClient } = require('mongodb'); @@ -340,7 +340,7 @@ Secrets follow a [well-defined pattern](https://docs.aws.amazon.com/secretsmanag For the lambda function, you can pass the secret arn as `SECRET_NAME`. In the lambda, you can then retrieve the secret details like this: -```javascript showLineNumbers +```javascript showshowLineNumbers const AWS = require('aws-sdk'); const { MongoClient } = require('mongodb'); diff --git a/src/content/docs/aws/services/dynamodbstreams.mdx b/src/content/docs/aws/services/dynamodbstreams.mdx index 630f98e8..7ed39db8 100644 --- a/src/content/docs/aws/services/dynamodbstreams.mdx +++ b/src/content/docs/aws/services/dynamodbstreams.mdx @@ -55,7 +55,7 @@ You can notice that in the `LatestStreamArn` field of the response: You can now create a Lambda function (`publishNewBark`) to process stream records from `BarkTable`. Create a new file named `index.js` with the following code: -```javascript showLineNumbers +```javascript showshowLineNumbers 'use strict'; var AWS = require("aws-sdk"); @@ -98,7 +98,7 @@ awslocal lambda create-function \ To test the Lambda function, you can invoke it using the [`Invoke`](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) API. Create a new file named `payload.json` with the following content: -```json showLineNumbers +```json showshowLineNumbers { "Records": [ { diff --git a/src/content/docs/aws/services/ecs.mdx b/src/content/docs/aws/services/ecs.mdx index d5a83fd9..8e926222 100644 --- a/src/content/docs/aws/services/ecs.mdx +++ b/src/content/docs/aws/services/ecs.mdx @@ -61,7 +61,7 @@ awslocal ecs create-cluster --cluster-name mycluster Containers within tasks are defined by a task definition that is managed outside of the context of a cluster. To create a task definition that runs an `ubuntu` container forever (by running an infinite loop printing "Running" on startup), create the following file as `task_definition.json`: -```json showLineNumbers +```json showshowLineNumbers { "containerDefinitions": [ { @@ -296,7 +296,7 @@ ecs_client.register_task_definition( The same functionality can be achieved with the AWS CDK following this (Python) example: -```python showLineNumbers +```python showshowLineNumbers task_definition = ecs.TaskDefinition( ... volumes=[ @@ -322,7 +322,7 @@ Your file paths might differ, so check Docker's documentation on [Environment Va Here is a Docker Compose example: -```yaml showLineNumbers +```yaml showshowLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/services/eks.mdx b/src/content/docs/aws/services/eks.mdx index cb119fe6..7cb226c7 100644 --- a/src/content/docs/aws/services/eks.mdx +++ b/src/content/docs/aws/services/eks.mdx @@ -307,7 +307,7 @@ To enable HTTPS for your endpoints, you can configure Kubernetes to use SSL/TLS The local EKS cluster comes pre-configured with a secret named `ls-secret-tls`, which can be conveniently utilized to define the `tls` section in the ingress configuration: -```yaml showLineNumbers +```yaml showshowLineNumbers apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -425,7 +425,7 @@ In such cases, path-based routing may not be ideal if you need the services to b To address this requirement, we recommend utilizing host-based routing rules, as demonstrated in the example below: -```bash showLineNumbers +```bash showshowLineNumbers cat < { diff --git a/src/content/docs/aws/services/fis.mdx b/src/content/docs/aws/services/fis.mdx index 04188559..998c555b 100644 --- a/src/content/docs/aws/services/fis.mdx +++ b/src/content/docs/aws/services/fis.mdx @@ -53,7 +53,7 @@ We will demonstrate how to create an experiment that stops EC2 instances. Create a new file named `create-experiment.json`. This file should contain a JSON configuration that will be utilized during the subsequent invocation of the [`CreateExperimentTemplate`](https://docs.aws.amazon.com/fis/latest/APIReference/API_CreateExperimentTemplate.html) API. -```json showLineNumbers +```json showshowLineNumbers { "actions": { "StopInstance": { diff --git a/src/content/docs/aws/services/glue.mdx b/src/content/docs/aws/services/glue.mdx index 8eead869..ebc3cafb 100644 --- a/src/content/docs/aws/services/glue.mdx +++ b/src/content/docs/aws/services/glue.mdx @@ -67,7 +67,7 @@ awslocal glue get-tables --database db1 Create a new PySpark script named `job.py` with the following code: -```python showLineNumbers +```python showshowLineNumbers from pyspark.sql import SparkSession def init_spark(): diff --git a/src/content/docs/aws/services/iot.mdx b/src/content/docs/aws/services/iot.mdx index 22658a5b..f383cb51 100644 --- a/src/content/docs/aws/services/iot.mdx +++ b/src/content/docs/aws/services/iot.mdx @@ -79,7 +79,7 @@ AWS provides its root CA certificate at [`https://www.amazontrust.com/repository When connecting to the endpoints, you will need to provide this root CA certificate for authentication. This is illustrated below with Python [AWS IoT SDK](https://docs.aws.amazon.com/iot/latest/developerguide/iot-sdks.html), -```py showLineNumbers +```py showshowLineNumbers import awscrt import boto3 from awsiot import mqtt_connection_builder @@ -127,7 +127,7 @@ For details on how ALPN works with AWS, see [this page](https://docs.aws.amazon. The client certificate and key can be retrieved using `CreateKeysAndCertificate` operation. The certificate is signed by the LocalStack root CA. -```py showLineNumbers +```py showshowLineNumbers result = iot_client.create_keys_and_certificate(setAsActive=True) # Path to file with saved content `result["certificatePem"]` diff --git a/src/content/docs/aws/services/iotanalytics.mdx b/src/content/docs/aws/services/iotanalytics.mdx index 33459454..9e0edb5f 100644 --- a/src/content/docs/aws/services/iotanalytics.mdx +++ b/src/content/docs/aws/services/iotanalytics.mdx @@ -85,7 +85,7 @@ awslocal iotanalytics create-pipeline --cli-input-json file://mypipeline.json The `mypipeline.json` file contains the following content: -```json showLineNumbers +```json showshowLineNumbers { "pipelineName": "mypipeline", "pipelineActivities": [ diff --git a/src/content/docs/aws/services/iotwireless.mdx b/src/content/docs/aws/services/iotwireless.mdx index d972f512..ec6545a6 100644 --- a/src/content/docs/aws/services/iotwireless.mdx +++ b/src/content/docs/aws/services/iotwireless.mdx @@ -69,7 +69,7 @@ awslocal iotwireless create-wireless-device \ The `input.json` file contains the following content: -```json title="input.json" showLineNumbers +```json title="input.json" showshowLineNumbers { "Description": "My LoRaWAN wireless device", "DestinationName": "IoTWirelessDestination", diff --git a/src/content/docs/aws/services/kinesis.mdx b/src/content/docs/aws/services/kinesis.mdx index 10eed089..6286f341 100644 --- a/src/content/docs/aws/services/kinesis.mdx +++ b/src/content/docs/aws/services/kinesis.mdx @@ -29,7 +29,7 @@ We will demonstrate how to create a Lambda function to consume events from a Kin You need to create a Lambda function that receives a Kinesis event input and processes the messages that it contains. Create a file named `index.mjs` with the following content: -```javascript showLineNumbers +```javascript showshowLineNumbers console.log('Loading function'); export const handler = (event, context) => { diff --git a/src/content/docs/aws/services/lakeformation.mdx b/src/content/docs/aws/services/lakeformation.mdx index 753a7d8f..d9f6c20d 100644 --- a/src/content/docs/aws/services/lakeformation.mdx +++ b/src/content/docs/aws/services/lakeformation.mdx @@ -71,7 +71,7 @@ awslocal lakeformation list-resources You can grant permissions to a user or group using the [`GrantPermissions`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_GrantPermissions.html) API. Create a file named `permissions.json` with the following content: -```json showLineNumbers +```json showshowLineNumbers { "CatalogId": "000000000000", "Principal": { diff --git a/src/content/docs/aws/services/lambda.mdx b/src/content/docs/aws/services/lambda.mdx index 767c8deb..6d6729e6 100644 --- a/src/content/docs/aws/services/lambda.mdx +++ b/src/content/docs/aws/services/lambda.mdx @@ -30,7 +30,7 @@ With the Function URL property, you can call a Lambda Function via an HTTP API c To create a new Lambda function, create a new file called `index.js` with the following code: -```javascript showLineNumbers +```javascript showshowLineNumbers exports.handler = async (event) => { let body = JSON.parse(event.body) const product = body.num1 * body.num2; diff --git a/src/content/docs/aws/services/managedblockchain.mdx b/src/content/docs/aws/services/managedblockchain.mdx index f77ef014..af051282 100644 --- a/src/content/docs/aws/services/managedblockchain.mdx +++ b/src/content/docs/aws/services/managedblockchain.mdx @@ -26,7 +26,7 @@ We will demonstrate how to create a blockchain network, a node, and a proposal. You can create a blockchain network using the [`CreateNetwork`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNetwork.html) API. Run the following command to create a network named `OurBlockchainNet` which uses the Hyperledger Fabric with the following configuration: -```bash showLineNumbers +```bash showshowLineNumbers awslocal managedblockchain create-network \ --cli-input-json '{ "Name": "OurBlockchainNet", @@ -81,7 +81,7 @@ Copy the `NetworkId` and `MemberId` values from the output of the above command, You can create a node using the [`CreateNode`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNode.html) API. Run the following command to create a node with the following configuration: -```bash showLineNumbers +```bash showshowLineNumbers awslocal managedblockchain create-node \ --node-configuration '{ "InstanceType": "bc.t3.small", diff --git a/src/content/docs/aws/services/neptune.mdx b/src/content/docs/aws/services/neptune.mdx index 02556448..90225215 100644 --- a/src/content/docs/aws/services/neptune.mdx +++ b/src/content/docs/aws/services/neptune.mdx @@ -91,7 +91,7 @@ To start a connection you have to use the `ws` protocol. Here is an example that uses Python and [`gremlinpython`](https://pypi.org/project/gremlinpython/) to connect to the database: -```python showLineNumbers +```python showshowLineNumbers from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection from gremlin_python.process.anonymous_traversal import traversal from gremlin_python.process.traversal import Bindings, T, gt @@ -279,7 +279,7 @@ This feature is in beta and any feedback is appreciated. Here is an example of how to use the `GraphSONSerializersV3d0` serializer with `gremlinpython==3.6.2`: -```python showLineNumbers +```python showshowLineNumbers from gremlin_python.driver import serializer from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection from gremlin_python.process.anonymous_traversal import traversal diff --git a/src/content/docs/aws/services/opensearch.mdx b/src/content/docs/aws/services/opensearch.mdx index 7992f962..449ab5a4 100644 --- a/src/content/docs/aws/services/opensearch.mdx +++ b/src/content/docs/aws/services/opensearch.mdx @@ -171,7 +171,7 @@ IAM support is also not yet available. A secure OpenSearch domain can be spawned with this example CLI input. Save it in a file named `opensearch_domain.json`. -```json title="opensearch_domain.json" showLineNumbers +```json title="opensearch_domain.json" showshowLineNumbers { "DomainName": "secure-domain", "ClusterConfig": { @@ -284,7 +284,7 @@ It's important to bear in mind that only a single backend configuration is possi Here is a sample `docker-compose.yaml` file that contains a single-node OpenSearch cluster and a basic LocalStack setup. -```yaml showLineNumbers +```yaml showshowLineNumbers services: opensearch: container_name: opensearch diff --git a/src/content/docs/aws/services/rds.mdx b/src/content/docs/aws/services/rds.mdx index a917c0fa..381b23c4 100644 --- a/src/content/docs/aws/services/rds.mdx +++ b/src/content/docs/aws/services/rds.mdx @@ -329,7 +329,7 @@ The [`aws_s3` extension](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/ In the SQL code snippet below, we are loading the `aws_s3` extension, then use the `table_import_from_s3(..)` function to populate the data in a table `table1` from a CSV file `test.csv` stored in a local S3 bucket `mybucket1`: -```sql lineNumbers +```sql showLineNumbers CREATE EXTENSION IF NOT EXISTS aws_s3 CASCADE; SELECT aws_s3.table_import_from_s3( 'table1', 'c1, c2, c3', '(format csv)', @@ -339,7 +339,7 @@ SELECT aws_s3.table_import_from_s3( Analogously, we can use the `query_export_to_s3(..)` extension function to export data from a table `table2` into a CSV file `test.csv` in local S3 bucket `mybucket2`: -```sql lineNumbers +```sql showLineNumbers CREATE EXTENSION IF NOT EXISTS aws_s3 CASCADE; SELECT aws_s3.query_export_to_s3( 'SELECT * FROM table2', diff --git a/src/content/docs/aws/services/s3.mdx b/src/content/docs/aws/services/s3.mdx index 2715d0ee..13fa4ac1 100644 --- a/src/content/docs/aws/services/s3.mdx +++ b/src/content/docs/aws/services/s3.mdx @@ -181,7 +181,7 @@ awslocal s3api create-bucket --bucket cors-bucket Next, create a JSON file with the CORS configuration. The file should have the following format: -```json title="cors-config.json" lineNumbers +```json title="cors-config.json" showLineNumbers { "CORSRules": [ { @@ -217,7 +217,7 @@ Your S3 bucket is configured to allow cross-origin resource sharing, and if you However, if you try to access your bucket from [LocalStack Web Application](https://app.localstack.cloud), you'll see errors, and your bucket won't be accessible anymore. We can edit the JSON file `cors-config.json` you created earlier with the following configuration and save it: -```json title="cors-config.json" lineNumbers +```json title="cors-config.json" showLineNumbers { "CORSRules": [ { @@ -263,7 +263,7 @@ IMAGE_NAME=localstack/localstack:s3-latest localstack start ``` -```yaml lineNumbers +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/services/servicediscovery.mdx b/src/content/docs/aws/services/servicediscovery.mdx index d2cdcb58..ed700dbc 100644 --- a/src/content/docs/aws/services/servicediscovery.mdx +++ b/src/content/docs/aws/services/servicediscovery.mdx @@ -82,7 +82,7 @@ awslocal ecs create-cluster \ Next, you will register a task definition that's compatible with Fargate. Create a file named `fargate-task.json` and add the following content: -```json title="fargate-task.json" lineNumbers +```json title="fargate-task.json" showLineNumbers { "family": "tutorial-task-def", "networkMode": "awsvpc", @@ -157,7 +157,7 @@ Make a note of the `GroupId` and `SubnetId` values. Create a new file named `ecs-service-discovery.json` and add the following content to it: -```json title="ecs-service-discovery.json" lineNumbers +```json title="ecs-service-discovery.json" showLineNumbers { "cluster": "tutorial", "serviceName": "ecs-service-discovery", diff --git a/src/content/docs/aws/services/sqs.mdx b/src/content/docs/aws/services/sqs.mdx index 75ee5348..85e6cda0 100644 --- a/src/content/docs/aws/services/sqs.mdx +++ b/src/content/docs/aws/services/sqs.mdx @@ -408,7 +408,7 @@ curl "http://localhost.localstack.cloud:4566/_aws/sqs/messages?QueueUrl=http://s ``` -```python lineNumbers +```python showLineNumbers import requests response = requests.get( @@ -577,7 +577,7 @@ aws --endpoint-url=http://localhost.localstack.cloud:4566/_aws/sqs/messages sqs ``` -```python lineNumbers +```python showLineNumbers import boto3 sqs = boto3.client("sqs", endpoint_url="http://localhost.localstack.cloud:4566/_aws/sqs/messages") response = sqs.receive_message(QueueUrl="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/my-queue") @@ -631,7 +631,7 @@ curl -H "Accept: application/json" \ ``` -```python lineNumbers +```python showLineNumbers import requests response = requests.get( "http://localhost.localstack.cloud:4566/_aws/sqs/messages", diff --git a/src/content/docs/aws/services/stepfunctions.mdx b/src/content/docs/aws/services/stepfunctions.mdx index 39426eef..21fb7abf 100644 --- a/src/content/docs/aws/services/stepfunctions.mdx +++ b/src/content/docs/aws/services/stepfunctions.mdx @@ -28,7 +28,7 @@ You can create a state machine using the [`CreateStateMachine`](https://docs.aws The API requires the name of the state machine, the state machine definition, and the role ARN that the state machine will assume to call AWS services. Run the following command to create a state machine: -```bash lineNumbers +```bash showLineNumbers awslocal stepfunctions create-state-machine \ --name "CreateAndListBuckets" \ --definition '{ @@ -167,7 +167,7 @@ The first step is to select the state machine where mocked responses should be a In this example, we'll use a state machine named `LambdaSQSIntegration`, defined as follows: -```json title="LambdaSQSIntegration.json" lineNumbers +```json title="LambdaSQSIntegration.json" showLineNumbers { "Comment": "This state machine is called: LambdaSQSIntegration", "QueryLanguage": "JSONata", @@ -223,7 +223,7 @@ This section specifies the Step Functions state machines to mock, along with the Each test case maps state names to `ResponseID`s defined in the `MockedResponses` section. -```json lineNumbers +```json showLineNumbers "StateMachines": { "": { "TestCases": { @@ -247,7 +247,7 @@ At runtime, if a test case is selected, the state uses the mocked response (if d Below is a complete example of the `StateMachines` section: -```json lineNumbers +```json showLineNumbers "LambdaSQSIntegration": { "TestCases": { "LambdaRetryCase": { @@ -264,7 +264,7 @@ This section defines mocked responses for Task states. Each `ResponseID` includes one or more step keys and defines either a `Return` value or a `Throw` error. -```json lineNumbers +```json showLineNumbers "MockedResponses": { "": { "": { "Return": ... }, @@ -287,7 +287,7 @@ Each entry must have **either** `Return` or `Throw`, but cannot have both. Here is a complete example of the `MockedResponses` section: -```json lineNumbers +```json showLineNumbers "MockedLambdaStateRetry": { "0": { "Throw": { @@ -314,7 +314,7 @@ Here is a complete example of the `MockedResponses` section: The `MockConfigFile.json` below is used to test the `LambdaSQSIntegration` state machine defined earlier. -```json lineNumbers +```json showLineNumbers { "StateMachines":{ "LambdaSQSIntegration":{ @@ -394,7 +394,7 @@ localstack start --volume /path/to/MockConfigFile.json:/tmp/MockConfigFile.json ``` -```yaml lineNumbers +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -452,7 +452,7 @@ awslocal stepfunctions describe-execution \ --execution-arn "arn:aws:states:us-east-1:000000000000:execution:LambdaSQSIntegration:MockExecutionBaseCase" ``` -```json lineNumbers +```json showLineNumbers { "executionArn": "arn:aws:states:us-east-1:000000000000:execution:LambdaSQSIntegration:MockExecutionBaseCase", "stateMachineArn": "arn:aws:states:us-east-1:000000000000:stateMachine:LambdaSQSIntegration", diff --git a/src/content/docs/aws/services/swf.mdx b/src/content/docs/aws/services/swf.mdx index e0802e5a..c086f339 100644 --- a/src/content/docs/aws/services/swf.mdx +++ b/src/content/docs/aws/services/swf.mdx @@ -82,7 +82,7 @@ awslocal swf list-domains --registration-status DEPRECATED You can register a workflow using the [`RegisterWorkflowType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterWorkflowType.html) API. Execute the following command to register a workflow named `test-workflow`: -```bash lineNumbers +```bash showLineNumbers awslocal swf register-workflow-type \ --domain test-domain \ --name test-workflow \ @@ -130,7 +130,7 @@ The following output would be retrieved: You can register an activity using the [`RegisterActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterActivityType.html) API. Execute the following command to register an activity named `test-activity`: -```bash lineNumbers +```bash showLineNumbers awslocal swf register-activity-type \ --domain test-domain \ --name test-activity \ @@ -145,7 +145,7 @@ awslocal swf register-activity-type \ You can use the [`DescribeActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeActivityType.html) API to verify that the activity was registered successfully. Run the following command to describe the `test-activity` activity: -```bash lineNumbers +```bash showLineNumbers awslocal swf describe-activity-type \ --domain test-domain \ --activity-type name=test-activity,version=1.0 @@ -180,7 +180,7 @@ The following output would be retrieved: You can start a workflow execution using the [`StartWorkflowExecution`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_StartWorkflowExecution.html) API. Execute the following command to start a workflow execution for the `test-workflow` workflow: -```bash lineNumbers +```bash showLineNumbers awslocal swf start-workflow-execution \ --domain test-domain \ --workflow-type name=test-workflow,version=1.0 \ diff --git a/src/content/docs/aws/services/transfer.mdx b/src/content/docs/aws/services/transfer.mdx index d8d21d32..150e3800 100644 --- a/src/content/docs/aws/services/transfer.mdx +++ b/src/content/docs/aws/services/transfer.mdx @@ -17,7 +17,7 @@ Whether you're looking to facilitate file transfers or enhance your data access This Python code demonstrates a basic workflow for transferring a file between a local machine and AWS S3 using the AWS Transfer Family service and FTP (File Transfer Protocol). -```python lineNumbers +```python showLineNumbers import io import time import uuid diff --git a/src/content/docs/aws/services/verifiedpermissions.mdx b/src/content/docs/aws/services/verifiedpermissions.mdx index 75f64e12..98a689b8 100644 --- a/src/content/docs/aws/services/verifiedpermissions.mdx +++ b/src/content/docs/aws/services/verifiedpermissions.mdx @@ -55,7 +55,7 @@ To create a Verified Permissions Policy, use the [`CreatePolicy`](https://docs.a Create a JSON file named `static_policy.json` with the following content: -```json lineNumbers +```json showLineNumbers { "static": { "description": "Grant the User alice access to view the trip Album", diff --git a/src/content/docs/aws/services/waf.mdx b/src/content/docs/aws/services/waf.mdx index 871a3b83..1d935684 100644 --- a/src/content/docs/aws/services/waf.mdx +++ b/src/content/docs/aws/services/waf.mdx @@ -26,7 +26,7 @@ We will walk you through creating, listing, tagging, and viewing tags for Web Ac Start by creating a Web Access Control List (WebACL) using the [`CreateWebACL`](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateWebACL.html) API. Run the following command to create a WebACL named `TestWebAcl`: -```bash lineNumbers +```bash showLineNumbers awslocal wafv2 create-web-acl \ --name TestWebAcl \ --scope REGIONAL \ diff --git a/src/content/docs/aws/services/xray.mdx b/src/content/docs/aws/services/xray.mdx index 2c6e7134..fbe1436f 100644 --- a/src/content/docs/aws/services/xray.mdx +++ b/src/content/docs/aws/services/xray.mdx @@ -42,7 +42,7 @@ You can generates a unique trace ID and constructs a JSON document with trace in It then sends this trace segment to the AWS X-Ray API using the [PutTraceSegments](https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html) API. Run the following commands in your terminal: -```bash lineNumbers +```bash showLineNumbers START_TIME=$(date +%s) HEX_TIME=$(printf '%x\n' $START_TIME) GUID=$(dd if=/dev/random bs=12 count=1 2>/dev/null | od -An -tx1 | tr -d ' \t\n') @@ -68,7 +68,7 @@ Sending trace segment to X-Ray API: {"trace_id": "1-6501ee11-056ec85fafff21f648e You can now retrieve the trace summaries from the last 10 minutes using the [GetTraceSummaries](https://docs.aws.amazon.com/xray/latest/api/API_GetTraceSummaries.html) API. Run the following commands in your terminal: -```bash lineNumbers +```bash showLineNumbers EPOCH=$(date +%s) awslocal xray get-trace-summaries --start-time $(($EPOCH-600)) --end-time $(($EPOCH)) { diff --git a/src/content/docs/aws/tooling/aws-replicator.mdx b/src/content/docs/aws/tooling/aws-replicator.mdx index a2443bf4..eb4d0d50 100644 --- a/src/content/docs/aws/tooling/aws-replicator.mdx +++ b/src/content/docs/aws/tooling/aws-replicator.mdx @@ -77,7 +77,7 @@ Follow the [installation instructions](/aws/getting-started/installation/#instal To start a replication job, get the ARN of the resource to replicate. Then, trigger the job using the command: -```bash lineNumbers +```bash showLineNumbers export LOCALSTACK_AUTH_TOKEN= export AWS_DEFAULT_REGION=... # if required @@ -101,7 +101,7 @@ localstack replicator start --resource-arn This triggers the replication job. The output will look similar to: -```json lineNumbers +```json showLineNumbers { "job_id": "50005865-1589-4f6d-a720-c86f5a5dd021", "state": "TESTING_CONNECTION", @@ -125,7 +125,7 @@ The output will look similar to: To trigger replication via the HTTP API, send a `POST` request to `http://localhost.localstack.cloud:4566/_localstack/replicator/jobs` with the following payload: -```json lineNumbers +```json showLineNumbers { "replication_type": "SINGLE_RESOURCE", "replication_job_config": { @@ -160,7 +160,7 @@ $ localstack replicator status This command returns the job status in JSON format, for example: -```json lineNumbers +```json showLineNumbers { "job_id": "50005865-1589-4f6d-a720-c86f5a5dd021", "state": "SUCCEEDED", @@ -210,7 +210,7 @@ AWS_PROFILE=ls-sandbox aws ssm put-parameter\ AWS_PROFILE=ls-sandbox aws ssm get-parameters --names myparam ``` -```json lineNumbers +```json showLineNumbers { "Parameters": [ { @@ -235,7 +235,7 @@ Next, we can check that the parameter is not present in LocalStack using `awsloc awslocal ssm get-parameters --name myparam ``` -```json lineNumbers +```json showLineNumbers { "Parameters": [], "InvalidParameters": [ @@ -258,7 +258,7 @@ LOCALSTACK_AUTH_TOKEN= \ Configured credentials from the AWS CLI -```json lineNumbers +```json showLineNumbers { "job_id": "9acdc850-f71b-4474-b138-1668eb8b8396", "state": "TESTING_CONNECTION", @@ -278,7 +278,7 @@ LOCALSTACK_AUTH_TOKEN= \ localstack replicator status 9acdc850-f71b-4474-b138-1668eb8b8396 ``` -```json lineNumbers +```json showLineNumbers { "job_id": "9acdc850-f71b-4474-b138-1668eb8b8396", "state": "SUCCEEDED", @@ -297,7 +297,7 @@ The SSM parameter is now accessible. awslocal ssm get-parameters --name myparam --region eu-central-1 ``` -```json lineNumbers +```json showLineNumbers { "Parameters": [ { diff --git a/src/content/docs/aws/tooling/extensions/developing-extensions.mdx b/src/content/docs/aws/tooling/extensions/developing-extensions.mdx index e6fc71dd..dfa8b6a8 100644 --- a/src/content/docs/aws/tooling/extensions/developing-extensions.mdx +++ b/src/content/docs/aws/tooling/extensions/developing-extensions.mdx @@ -18,7 +18,7 @@ LocalStack exposes a Python API for building extensions that can be found in the The basic interface to implement is as follows: -```python lineNumbers +```python showLineNumbers class Extension(BaseExtension): """ An extension that is loaded into LocalStack dynamically. The method @@ -89,7 +89,7 @@ class Extension(BaseExtension): A minimal example would look like this: -```python lineNumbers +```python showLineNumbers import logging from localstack.extensions.api import Extension @@ -123,7 +123,7 @@ example below. A minimal `setup.cfg` for the extension above could look like this: -```toml lineNumbers +```toml showLineNumbers [metadata] name = localstack-extension-ready-announcer description = LocalStack extension that logs when LocalStack is ready to receive requests @@ -174,7 +174,7 @@ Commands: First, create a new extension from a template. To use `localstack extensions dev new`, you will also need to install [cookiecutter](https://github.com/cookiecutter/cookiecutter) via `pip install cookiecutter`. -```bash lineNumbers +```bash showLineNumbers $ localstack extensions dev new project_name [My LocalStack Extension]: project_short_description [All the boilerplate you need to create a LocalStack extension.]: diff --git a/src/content/docs/aws/tooling/extensions/managing-extensions.mdx b/src/content/docs/aws/tooling/extensions/managing-extensions.mdx index afd5accc..18b9b817 100644 --- a/src/content/docs/aws/tooling/extensions/managing-extensions.mdx +++ b/src/content/docs/aws/tooling/extensions/managing-extensions.mdx @@ -109,7 +109,7 @@ If you want to use the `file://` directive, the distribution file needs to be mo In a docker-compose file, this would look something like: -```yaml lineNumbers +```yaml showLineNumbers services: localstack: container_name: "localstack-main" @@ -153,7 +153,7 @@ An example project could look something like this: * `docker-compose.yaml` - ```yaml lineNumbers + ```yaml showLineNumbers services: localstack: ... diff --git a/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx b/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx index 63d652f5..8e1629c7 100644 --- a/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx +++ b/src/content/docs/aws/tooling/lambda-tools/hot-reloading.mdx @@ -227,7 +227,7 @@ First, create a watchman wrapper by using After that, you can use the following `Makefile` snippet, or implement another shell script to prepare the codebase for hot reloading: -```make lineNumbers +```make showLineNumbers BUILD_FOLDER ?= build PROJECT_MODULE_NAME = my_project_module @@ -277,7 +277,7 @@ npm install -D @types/aws-lambda esbuild Create a new file named `index.ts`. Add the following code to the new file: -```ts lineNumbers +```ts showLineNumbers import { Context, APIGatewayProxyResult, APIGatewayEvent } from 'aws-lambda'; export const handler = async (event: APIGatewayEvent, context: Context): Promise => { @@ -453,7 +453,7 @@ You can now see that the changes are applied without redeploying the Lambda func -```yaml lineNumbers +```yaml showLineNumbers custom: localstack: ... @@ -477,7 +477,7 @@ custom: ``` -```kotlin lineNumbers +```kotlin showLineNumbers package org.localstack.cdkstack import java.util.UUID @@ -521,7 +521,7 @@ class ApplicationStack(parent: Construct, name: String) : Stack(parent, name) { ``` -```hcl lineNumbers +```hcl showLineNumbers variable "STAGE" { type = string default = "local" @@ -650,7 +650,7 @@ For bash, please use single quotes `'` instead of double quotes `"` to make sure In order to make use of the environment variable placeholders, you can inject them into the LocalStack container, for example using the following `docker-compose.yml` file. -```yaml lineNumbers +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" diff --git a/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx b/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx index eff9c268..6a400fdd 100644 --- a/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx +++ b/src/content/docs/aws/tooling/lambda-tools/remote-debugging.mdx @@ -56,7 +56,7 @@ inside the Lambda function code. In general, all you need is the following code fragment placed inside your handler code: -```python lineNumbers +```python showLineNumbers import debugpy debugpy.listen(("0.0.0.0", 19891)) debugpy.wait_for_client() # blocks execution until client is attached @@ -65,7 +65,7 @@ debugpy.wait_for_client() # blocks execution until client is attached For extra convenience, you can use the `wait_for_debug_client` function from our example. It implements the above-mentioned start of the debug server and also adds an automatic cancellation of the wait task if the debug client (i.e. VSCode) doesn't connect. -```python lineNumbers +```python showLineNumbers def wait_for_debug_client(timeout=15): """Utility function to enable debugging with Visual Studio Code""" import time, threading @@ -89,7 +89,7 @@ def wait_for_debug_client(timeout=15): For attaching the debug server from Visual Studio Code, you need to add a run configuration. -```json lineNumbers +```json showLineNumbers { "version": "0.2.0", "configurations": [ @@ -287,7 +287,7 @@ Make sure you installed the following extensions: Add a new task by creating/modifying the `.vscode/tasks.json` file: -```json lineNumbers +```json showLineNumbers { "version": "2.0.0", "tasks": [ @@ -303,7 +303,7 @@ Add a new task by creating/modifying the `.vscode/tasks.json` file: Create a new `launch.json` file or edit an existing one from the `Run and Debug` tab, then add the following configuration: -```json lineNumbers +```json showLineNumbers { "version": "0.2.0", "configurations": [ @@ -330,7 +330,7 @@ lambda function. Set the `LAMBDA_DOCKER_FLAGS` to enable the debugger using `NODE_OPTIONS`: -```yaml lineNumbers +```yaml showLineNumbers #docker-compose.yml services: @@ -345,7 +345,7 @@ services: Add a new task by creating/modifying the `.vscode/tasks.json` file: -```json lineNumbers +```json showLineNumbers { "version": "2.0.0", "tasks": [ @@ -361,7 +361,7 @@ Add a new task by creating/modifying the `.vscode/tasks.json` file: Create a new `launch.json` file or edit an existing one from the `Run and Debug` tab, then add the following configuration: -```json lineNumbers +```json showLineNumbers { "version": "0.2.0", "configurations": [ @@ -381,7 +381,7 @@ then add the following configuration: A simple example of a Node.js lambda, `myindex.js` could look like this: -```js lineNumbers +```js showLineNumbers exports.handler = async (event) => { console.log(event); const response = { @@ -469,7 +469,7 @@ localstack start --volume /path/to/debug-config.yaml:/tmp/lambda_debug_mode_conf ``` -```yaml lineNumbers +```yaml showLineNumbers services: localstack: container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" @@ -500,7 +500,7 @@ for each specific Lambda function ARN. #### Example: Basic Debugging Configuration This example configures Lambda Debug Mode to use port 19891 for the remote debugger. -```yaml lineNumbers +```yaml showLineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one: debug-port: 19891 @@ -510,7 +510,7 @@ functions: In this example, the automatic timeout handling feature is disabled for the specified Lambda function, enforcing the predefined timeouts instead. -```yaml lineNumbers +```yaml showLineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one: debug-port: 19891 @@ -522,7 +522,7 @@ functions: Specifying an unqualified Lambda ARN in the configuration is equivalent to specifying the ARN with the `$LATEST` version qualifier. -```yaml lineNumbers +```yaml showLineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one:$LATEST: debug-port: 19891 @@ -534,7 +534,7 @@ To debug multiple Lambda functions simultaneously, assign a different debug port Note that this configuration affects the container's internal debugger port as well, so the debugger port must be set accordingly. -```yaml lineNumbers +```yaml showLineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one: debug-port: 19891 @@ -546,7 +546,7 @@ functions: You can also debug different versions of the same Lambda function by assigning unique ports to each version. -```yaml lineNumbers +```yaml showLineNumbers functions: arn:aws:lambda:eu-central-1:000000000000:function:func-one:1: debug-port: 19891 diff --git a/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md b/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md index 2fa2b2c4..9f2b42d9 100644 --- a/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md +++ b/src/content/docs/aws/tooling/localstack-sdks/java-sdk.md @@ -25,7 +25,7 @@ This SDK is still in a preview phase, and will be subject to fast and breaking c The best way to use the LocalStack SDK for Java in your project is to consume it from Maven Central. You can use Maven to import the entire SDK into your project. -```xml lineNumbers +```xml showLineNumbers cloud.localstack localstack-sdk @@ -50,7 +50,7 @@ For instance, let us imagine the case in which you want to add a fault rule for You first need to use the `FaultRuleRequest` class to build a fault rule request. Then, you need to pass such a request object to the `addFaultRules` method of a created `ChaosClient`. -```java lineNumbers +```java showLineNumbers import cloud.localstack.sdk.chaos.ChaosClient; import cloud.localstack.sdk.chaos.requests.FaultRuleRequest; @@ -63,7 +63,7 @@ As a second example, let us look at the necessary code to save and load a Cloud Similarly to the `ChaosClient`, the `PodsClient` exposes two functions, `savePod` and `loadPod`, which expect a `SavePodRequest` and a `LoadPodRequest`, respectively. The resulting code is the following: -```java lineNumbers +```java showLineNumbers import cloud.localstack.sdk.pods.PodsClient; import cloud.localstack.sdk.pods.requests.LoadPodRequest; import cloud.localstack.sdk.pods.requests.SavePodRequest; diff --git a/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md b/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md index 98c4fe2c..c4f3e971 100644 --- a/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md +++ b/src/content/docs/aws/tooling/localstack-sdks/python-sdk.md @@ -43,7 +43,7 @@ Using the SDK in Python is straightforward: developers can import the relevant m The following code snippet shows how to set up an SQS client, create a queue, send messages, and retrieve them to test local SQS interactions using LocalStack. -```python lineNumbers +```python showLineNumbers import json import boto3 import localstack.sdk.aws @@ -88,7 +88,7 @@ for msg in messages: -```bash lineNumbers +```bash showLineNumbers Message Body: {"event": "event-0", "message": "message-0"} Message Body: {"event": "event-1", "message": "message-1"} Message Body: {"event": "event-2", "message": "message-2"} @@ -100,7 +100,7 @@ Message Body: {"event": "event-4", "message": "message-4"} The following code snippet verifies an email address, sends a raw email, retrieves the message ID, and discards all SES messages afterward. -```python lineNumbers +```python showLineNumbers import boto3 import localstack.sdk.aws @@ -148,7 +148,7 @@ Cloud Pods is a feature that enables storing and managing snapshots of the curre This code snippet shows listing available pods, saving a new pod, loading it, and then deleting it. You need to set your `LOCALSTACK_AUTH_TOKEN` in your terminal session before running the snippet. -```python lineNumbers +```python showLineNumbers from localstack.sdk.pods import PodsClient POD_NAME = "ls-cloud-pod" @@ -184,7 +184,7 @@ Pod 'ls-cloud-pod' deleted. The following example demonstrates how to reset the current cloud state using LocalStack’s `StateClient`. -```python lineNumbers +```python showLineNumbers import boto3 from localstack.sdk.state import StateClient @@ -228,7 +228,7 @@ Error after state reset: AWS.SimpleQueueService.NonExistentQueue LocalStack’s Chaos API enables fault injection to simulate issues in AWS services. This example shows how to add a fault rule for the S3 service, retrieve and display the rule, and finally delete it to return to normal operations. -```python lineNumbers +```python showLineNumbers import localstack.sdk.chaos from localstack.sdk.models import FaultRule diff --git a/src/content/docs/aws/tooling/testing-utils.md b/src/content/docs/aws/tooling/testing-utils.md index c41f3096..3db70716 100644 --- a/src/content/docs/aws/tooling/testing-utils.md +++ b/src/content/docs/aws/tooling/testing-utils.md @@ -23,7 +23,7 @@ pip install localstack-utils ### Usage -```python lineNumbers +```python showLineNumbers import time import boto3 import unittest diff --git a/src/content/docs/aws/tutorials/elb-load-balancing.mdx b/src/content/docs/aws/tutorials/elb-load-balancing.mdx index 5b180910..5968bb4c 100644 --- a/src/content/docs/aws/tutorials/elb-load-balancing.mdx +++ b/src/content/docs/aws/tutorials/elb-load-balancing.mdx @@ -93,7 +93,7 @@ This bucket is responsible for storing the deployment artifacts and ensuring tha We have a `serverless.yml` file in the directory to define our Serverless project's configuration, which includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. To set up the plugins we installed earlier, you need to add the following properties to your `serverless.yml` file: -```yaml lineNumbers +```yaml showLineNumbers service: serverless-elb frameworkVersion: '3' @@ -123,7 +123,7 @@ Configure a `deploy` script in your `package.json` file to simplify the deployme It lets you run the `serverless deploy` command directly over your local infrastructure. Update your `package.json` file to include the following: -```json lineNumbers +```json showLineNumbers { "name": "serverless-elb", "version": "1.0.0", @@ -156,7 +156,7 @@ This will execute the `serverless deploy --stage local` command, deploying your Now, let's create two Lambda functions named `hello1` and `hello2` that will run on the Node.js 12.x runtime. Open the `handler.js` file and replace the existing code with the following: -```js lineNumbers +```js showLineNumbers 'use strict'; module.exports.hello1 = async (event) => { @@ -194,7 +194,7 @@ It is typically used when you need to include binary content in the response bod Let us now configure the `serverless.yml` file to create an Application Load Balancer (ALB) and attach the Lambda functions to it. -```yaml lineNumbers +```yaml showLineNumbers service: serverless-elb provider: @@ -239,7 +239,7 @@ In this example, both functions are triggered by HTTP GET requests to the `/hell Lastly, let's create a VPC, a subnet, an Application Load Balancer, and an HTTP listener on the load balancer that redirects traffic to the target group. To do this, add the following resources to your `serverless.yml` file: -```yaml lineNumbers +```yaml showLineNumbers ... resources: Resources: @@ -321,7 +321,7 @@ This output confirms the successful deployment of your Serverless service to the It also displays information about the deployed Lambda functions (`hello1` and `hello2`). You can run the following command to verify that the functions and the load balancers have been deployed: -```bash lineNumbers +```bash showLineNumbers awslocal lambda list-functions { "Functions": [ diff --git a/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx index 3d06f47e..25800509 100644 --- a/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx +++ b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx @@ -47,7 +47,7 @@ Optionally, you can create a folder called `assets` to store images and other as Let's create a directory named `s3-static-website-localstack` where we'll store our static website files. If you don't have an `index.html` file, you can use the following code to create one: -```html lineNumbers +```html showLineNumbers @@ -65,7 +65,7 @@ S3 will serve this file when a user visits the root URL of your static website, In a similar fashion, you can configure a custom error document that contains a user-friendly error message. Let's create a file named `error.html` and add the following code: -```html lineNumbers +```html showLineNumbers @@ -96,7 +96,7 @@ awslocal s3api create-bucket --bucket testwebsite With the bucket created, we can now attach a policy to it to allow public access and its contents. Let's create a file named `bucket_policy.json` in the root directory and add the following code: -```json lineNumbers +```json showLineNumbers { "Version": "2012-10-17", "Statement": [ @@ -147,7 +147,7 @@ Before that, we would need to manually configure the local service endpoints and We will use the [AWS Provider for Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to interact with the many resources supported by AWS in LocalStack. Create a new file named `provider.tf` and specify mock credentials for the AWS provider: -```hcl lineNumbers +```hcl showLineNumbers provider "aws" { region = "us-east-1" access_key = "fake" @@ -161,7 +161,7 @@ Additionally, we have to point the individual services to LocalStack. We can do this by specifying the `endpoints` parameter for each service, that we intend to use. Our `provider.tf` file should look like this: -```hcl lineNumbers +```hcl showLineNumbers provider "aws" { access_key = "test" secret_key = "test" @@ -190,7 +190,7 @@ For most of the other services, it is fine to use `localhost:4566`. With the provider configured, we can now configure the variables for our S3 bucket. Create a new file named `variables.tf` and add the following code: -```hcl lineNumbers +```hcl showLineNumbers variable "bucket_name" { description = "Name of the s3 bucket. Must be unique." type = string @@ -207,7 +207,7 @@ We take a user input for the bucket name and tags. Next, we will define the output variables for our Terraform configuration. Create a new file named `outputs.tf` and add the following code: -```hcl lineNumbers +```hcl showLineNumbers output "arn" { description = "ARN of the bucket" value = aws_s3_bucket.s3_bucket.arn @@ -232,7 +232,7 @@ The output variables are the ARN, name, domain name, and website endpoint of the With all the configuration files in place, we can now create the S3 bucket. Create a new file named `main.tf` and create the S3 bucket using the following code: -```hcl lineNumbers +```hcl showLineNumbers resource "aws_s3_bucket" "s3_bucket" { bucket = var.bucket_name tags = var.tags @@ -242,7 +242,7 @@ resource "aws_s3_bucket" "s3_bucket" { To configure the static website hosting, we will use the `aws_s3_bucket_website_configuration` resource. Add the following code to the `main.tf` file: -```hcl lineNumbers +```hcl showLineNumbers resource "aws_s3_bucket_website_configuration" "s3_bucket" { bucket = aws_s3_bucket.s3_bucket.id @@ -260,7 +260,7 @@ resource "aws_s3_bucket_website_configuration" "s3_bucket" { To set the bucket policy, we will use the `aws_s3_bucket_policy` resource. Add the following code to the `main.tf` file: -```hcl lineNumbers +```hcl showLineNumbers resource "aws_s3_bucket_acl" "s3_bucket" { bucket = aws_s3_bucket.s3_bucket.id acl = "public-read" @@ -292,7 +292,7 @@ Pick up an appropriate policy based on your use case. Let's use the `aws_s3_object` resource to upload the files to the bucket. Add the following code to the `main.tf` file: -```hcl lineNumbers +```hcl showLineNumbers resource "aws_s3_object" "object_www" { depends_on = [aws_s3_bucket.s3_bucket] for_each = fileset("${path.root}", "*.html") @@ -309,7 +309,7 @@ The above code uploads all our html files to the bucket. We are also setting the ACL of the files to `public-read`. Optionally, if you have static assets like images, CSS, and JavaScript files, you can upload them to the bucket using the same `aws_s3_bucket_object` resource by adding the following code to the `main.tf` file: -```hcl lineNumbers +```hcl showLineNumbers resource "aws_s3_object" "object_assets" { depends_on = [aws_s3_bucket.s3_bucket] for_each = fileset(path.module, "assets/*") From b42702e722f014951aa1916c2be96a05eb79697d Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Tue, 24 Jun 2025 14:32:18 -0700 Subject: [PATCH 7/8] fixing my dumbass mistake in snow docs too, wrote lineNumbers instead of showLineNumbers --- src/content/docs/snowflake/capabilities/init-hooks.mdx | 2 +- src/content/docs/snowflake/capabilities/state-management.mdx | 4 ++-- src/content/docs/snowflake/features/accounts.md | 4 ++-- src/content/docs/snowflake/features/authentication.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/content/docs/snowflake/capabilities/init-hooks.mdx b/src/content/docs/snowflake/capabilities/init-hooks.mdx index 1e0c5360..af0d941b 100644 --- a/src/content/docs/snowflake/capabilities/init-hooks.mdx +++ b/src/content/docs/snowflake/capabilities/init-hooks.mdx @@ -30,7 +30,7 @@ Mount the script into `/etc/localstack/init/ready.d/` using Docker Compose or th -```yaml lineNumbers +```yaml showLineNumbers version: "3.8" services: diff --git a/src/content/docs/snowflake/capabilities/state-management.mdx b/src/content/docs/snowflake/capabilities/state-management.mdx index e7fbff1c..f3a22093 100644 --- a/src/content/docs/snowflake/capabilities/state-management.mdx +++ b/src/content/docs/snowflake/capabilities/state-management.mdx @@ -32,7 +32,7 @@ localstack start ``` -```yaml lineNumbers +```yaml showLineNumbers ... image: localstack/snowflake environment: @@ -43,7 +43,7 @@ localstack start ``` -```bash lineNumbers +```bash showLineNumbers docker run \ -e LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} \ -e PERSISTENCE=1 \ diff --git a/src/content/docs/snowflake/features/accounts.md b/src/content/docs/snowflake/features/accounts.md index f721c504..15bc75eb 100644 --- a/src/content/docs/snowflake/features/accounts.md +++ b/src/content/docs/snowflake/features/accounts.md @@ -25,7 +25,7 @@ If the Snowflake driver provides a connection object, you can pass the `account` Example using the Snowflake Connector for Python: -```python lineNumbers +```python showLineNumbers sf_conn_obj = sf.connect( account="your_account", # other parameters @@ -34,7 +34,7 @@ sf_conn_obj = sf.connect( Example using the NodeJS Driver for Snowflake: -```javascript lineNumbers +```javascript showLineNumbers var connection = snowflake.createConnection({ account: "your_account", // other parameters diff --git a/src/content/docs/snowflake/features/authentication.md b/src/content/docs/snowflake/features/authentication.md index 3cb9c7a8..a5b53c59 100644 --- a/src/content/docs/snowflake/features/authentication.md +++ b/src/content/docs/snowflake/features/authentication.md @@ -18,7 +18,7 @@ To authenticate using a username and password, you can set the `user` and `passw Here's an example of how to connect to the Snowflake emulator using a username and password in a Python script: -```python lineNumbers +```python showLineNumbers import snowflake.connector as sf sf_conn_obj = sf.connect( @@ -55,7 +55,7 @@ ALTER USER your_user_name SET RSA_PUBLIC_KEY=''; Then authenticate with the private key using the Snowflake client: -```python lineNumbers +```python showLineNumbers import snowflake.connector conn = snowflake.connector.connect( From 44cff85c3dce0483cfac7266f3193706b28b018b Mon Sep 17 00:00:00 2001 From: Quetzalli Writes Date: Tue, 24 Jun 2025 14:44:14 -0700 Subject: [PATCH 8/8] finished adding showLineNumbers to snow docs --- .../features/cross-database-resource-sharing.md | 12 ++++++------ .../docs/snowflake/features/dynamic-tables.md | 2 +- .../docs/snowflake/features/iceberg-tables.md | 2 +- .../docs/snowflake/features/materialized-views.md | 2 +- .../docs/snowflake/features/polaris-catalog.md | 14 +++++++------- .../docs/snowflake/features/row-access-policies.md | 4 ++-- src/content/docs/snowflake/features/snowpipe.md | 6 +++--- src/content/docs/snowflake/features/stages.mdx | 6 +++--- .../snowflake/features/storage-integrations.md | 4 ++-- src/content/docs/snowflake/features/streamlit.md | 2 +- src/content/docs/snowflake/features/streams.md | 4 ++-- src/content/docs/snowflake/features/tasks.md | 2 +- .../snowflake/features/transaction-management.md | 2 +- .../snowflake/features/user-defined-functions.md | 8 ++++---- .../docs/snowflake/getting-started/index.md | 4 ++-- .../docs/snowflake/getting-started/quickstart.md | 4 ++-- src/content/docs/snowflake/integrations/airflow.md | 8 ++++---- .../integrations/continuous-integration.mdx | 6 +++--- src/content/docs/snowflake/integrations/dbt.md | 8 ++++---- src/content/docs/snowflake/integrations/pulumi.md | 2 +- .../docs/snowflake/integrations/snow-sql.md | 2 +- .../snowflake/integrations/snowflake-drivers.md | 10 +++++----- .../docs/snowflake/integrations/snowpark.md | 8 ++++---- .../docs/snowflake/integrations/terraform.md | 4 ++-- .../tutorials/aws-lambda-localstack-snowpark.md | 14 +++++++------- .../credit-scoring-with-localstack-snowpark.md | 12 ++++++------ 26 files changed, 76 insertions(+), 76 deletions(-) diff --git a/src/content/docs/snowflake/features/cross-database-resource-sharing.md b/src/content/docs/snowflake/features/cross-database-resource-sharing.md index c9bed215..4b6bb1a7 100644 --- a/src/content/docs/snowflake/features/cross-database-resource-sharing.md +++ b/src/content/docs/snowflake/features/cross-database-resource-sharing.md @@ -19,7 +19,7 @@ In this guide, we'll walk through a series of Snowflake SQL statements to create Create three databases to represent the three different organizations that will share resources. In this example, we'll create databases for `db_name1`, `db_name2`, and `db_name3`. -```sql +```sql showLineNumbers CREATE DATABASE db_name1_actual; CREATE DATABASE db_name2_actual; CREATE DATABASE db_name3_actual; @@ -29,7 +29,7 @@ CREATE DATABASE db_name3_actual; Create a schema in each database to represent the shared resources. In this example, you can create a schema called `sch` in each database. -```sql +```sql showLineNumbers CREATE SCHEMA db_name1_actual.sch; CREATE SCHEMA db_name2_actual.sch; CREATE SCHEMA db_name3_actual.sch; @@ -39,7 +39,7 @@ CREATE SCHEMA db_name3_actual.sch; Create a table in each schema to represent the shared resources. In this example, you can create a table called `table1` in `db_name1_actual.sch`, `table2` in `db_name2_actual.sch`, and `table3` in `db_name3_actual.sch`. -```sql +```sql showLineNumbers CREATE TABLE db_name1_actual.sch.table1 (id INT); CREATE TABLE db_name2_actual.sch.table2 (id INT); CREATE TABLE db_name3_actual.sch.table3 (id INT); @@ -49,7 +49,7 @@ CREATE TABLE db_name3_actual.sch.table3 (id INT); You can now insert data into the tables to represent the shared resources. In this example, we'll insert a single row into each table. -```sql +```sql showLineNumbers INSERT INTO db_name1_actual.sch.table1 (id) VALUES (1); INSERT INTO db_name2_actual.sch.table2 (id) VALUES (2); INSERT INTO db_name3_actual.sch.table3 (id) VALUES (3); @@ -67,7 +67,7 @@ CREATE VIEW db_name1_actual.sch.view1 AS SELECT * FROM db_name1_actual.sch.table You can creates a secure view `view3` in `db_name3_actual.sch` by joining data from different tables. -```sql +```sql showLineNumbers CREATE SECURE VIEW db_name3_actual.sch.view3 AS SELECT view1.id AS View1Id, table2.id AS table2id, table3.id AS table3id FROM db_name1_actual.sch.view1 view1, db_name2_actual.sch.table2 table2, db_name3_actual.sch.table3 table3; @@ -77,7 +77,7 @@ FROM db_name1_actual.sch.view1 view1, db_name2_actual.sch.table2 table2, db_name You can create a share `s_actual` and grant usage permissions on the `db_name3_actual` database and its schema. -```sql +```sql showLineNumbers CREATE SHARE s_actual; GRANT USAGE ON DATABASE db_name3_actual TO SHARE s_actual; GRANT USAGE ON SCHEMA db_name3_actual.sch TO SHARE s_actual; diff --git a/src/content/docs/snowflake/features/dynamic-tables.md b/src/content/docs/snowflake/features/dynamic-tables.md index 74180951..176a0802 100644 --- a/src/content/docs/snowflake/features/dynamic-tables.md +++ b/src/content/docs/snowflake/features/dynamic-tables.md @@ -37,7 +37,7 @@ The output should be: You can create a dynamic table using the `CREATE DYNAMIC TABLE` statement. Run the following query to create a dynamic table: -```sql +```sql showLineNumbers CREATE OR REPLACE DYNAMIC TABLE t_12345 TARGET_LAG = '1 minute' WAREHOUSE = 'test' REFRESH_MODE = auto INITIALIZE = on_create AS SELECT id, name FROM example_table_name; diff --git a/src/content/docs/snowflake/features/iceberg-tables.md b/src/content/docs/snowflake/features/iceberg-tables.md index 68b97e67..c62d0658 100644 --- a/src/content/docs/snowflake/features/iceberg-tables.md +++ b/src/content/docs/snowflake/features/iceberg-tables.md @@ -27,7 +27,7 @@ $ awslocal s3 mb s3://test-bucket You can create an external volume using the `CREATE OR REPLACE EXTERNAL VOLUME` statement. The external volume is used to define the location of the files that Iceberg will use to store the table data. -```sql +```sql showLineNumbers CREATE OR REPLACE EXTERNAL VOLUME test_volume STORAGE_LOCATIONS = ( ( diff --git a/src/content/docs/snowflake/features/materialized-views.md b/src/content/docs/snowflake/features/materialized-views.md index 8e49ad2b..71278159 100644 --- a/src/content/docs/snowflake/features/materialized-views.md +++ b/src/content/docs/snowflake/features/materialized-views.md @@ -19,7 +19,7 @@ The following sections guide you through creating materialized views, inserting To create a materialized view, use the `CREATE MATERIALIZED VIEW` statement. The following example creates a view `order_view` that selects specific columns from the `orders` table. -```sql +```sql showLineNumbers CREATE TABLE IF NOT EXISTS orders ( id INT, product TEXT, diff --git a/src/content/docs/snowflake/features/polaris-catalog.md b/src/content/docs/snowflake/features/polaris-catalog.md index 764c8834..85321755 100644 --- a/src/content/docs/snowflake/features/polaris-catalog.md +++ b/src/content/docs/snowflake/features/polaris-catalog.md @@ -25,7 +25,7 @@ This guide shows how to use the Polaris REST catalog to create Iceberg tables in The following command starts the Polaris catalog container using the `localstack/polaris` Docker image: -```bash +```bash showLineNumbers docker run -d --name polaris-test \ -p 8181:8181 -p 8182:8182 \ -e AWS_REGION=us-east-1 \ @@ -48,7 +48,7 @@ curl -X GET http://localhost:8182/health Set variables and retrieve an access token: -```bash +```bash showLineNumbers REALM="default-realm" CLIENT_ID="root" CLIENT_SECRET="s3cr3t" @@ -64,7 +64,7 @@ The `TOKEN` variable will contain the access token. Create a catalog: -```bash +```bash showLineNumbers curl -s -X POST http://localhost:8181/api/management/v1/catalogs \ -H "Authorization: Bearer $TOKEN" \ -H "Content-Type: application/json" \ @@ -89,7 +89,7 @@ curl -s -X POST http://localhost:8181/api/management/v1/catalogs \ Grant necessary permissions to the catalog: -```bash +```bash showLineNumbers curl -s -X PUT http://localhost:8181/api/management/v1/catalogs/polaris/catalog-roles/catalog_admin/grants \ -H "Authorization: Bearer $TOKEN" \ -H "Content-Type: application/json" \ @@ -108,7 +108,7 @@ awslocal s3 mb s3://$BUCKET_NAME In your SQL client, create an external volume using the `CREATE EXTERNAL VOLUME` statement: -```sql +```sql showLineNumbers CREATE EXTERNAL VOLUME polaris_volume STORAGE_LOCATIONS = ( ( @@ -126,7 +126,7 @@ ALLOW_WRITES = TRUE; Create a catalog integration using the `CREATE CATALOG INTEGRATION` statement: -```sql +```sql showLineNumbers CREATE CATALOG INTEGRATION polaris_catalog CATALOG_SOURCE = ICEBERG_REST TABLE_FORMAT = ICEBERG @@ -150,7 +150,7 @@ COMMENT = 'Polaris catalog integration'; Now create the table using the Polaris catalog and volume: -```sql +```sql showLineNumbers CREATE ICEBERG TABLE polaris_iceberg_table (c1 TEXT) CATALOG = 'polaris_catalog', EXTERNAL_VOLUME = 'polaris_volume', diff --git a/src/content/docs/snowflake/features/row-access-policies.md b/src/content/docs/snowflake/features/row-access-policies.md index 95a6f11b..7edf9caa 100644 --- a/src/content/docs/snowflake/features/row-access-policies.md +++ b/src/content/docs/snowflake/features/row-access-policies.md @@ -19,7 +19,7 @@ The following sections demonstrate how to create a row access policy, attach it Use the `CREATE ROW ACCESS POLICY` statement to define a filter condition. This policy will restrict row visibility based on column values. -```sql +```sql showLineNumbers CREATE OR REPLACE ROW ACCESS POLICY id_filter_policy AS (id INT) RETURNS BOOLEAN -> id IN (1, 2); @@ -29,7 +29,7 @@ AS (id INT) RETURNS BOOLEAN -> Create a table and bind the row access policy to one of its columns using the `WITH ROW ACCESS POLICY` clause. -```sql +```sql showLineNumbers CREATE TABLE accounts ( id INT ) diff --git a/src/content/docs/snowflake/features/snowpipe.md b/src/content/docs/snowflake/features/snowpipe.md index b5bc193e..dc1d52be 100644 --- a/src/content/docs/snowflake/features/snowpipe.md +++ b/src/content/docs/snowflake/features/snowpipe.md @@ -27,7 +27,7 @@ awslocal s3 mb s3://test-bucket You can create a stage using the `CREATE STAGE` command. The stage is used to define the location of the files that Snowpipe will load into the table. -```sql +```sql showLineNumbers CREATE STAGE test_stage URL='s3://test-bucket' CREDENTIALS = ( @@ -68,7 +68,7 @@ Retrieve the `notification_channel` value from the output of the `DESC PIPE` que You can use the [`PutBucketNotificationConfiguration`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) API to create a bucket notification configuration that sends notifications to Snowflake when new files are uploaded to the S3 bucket. -```bash +```bash showLineNumbers awslocal s3api put-bucket-notification-configuration \ --bucket test-bucket \ --notification-configuration file://notification.json @@ -76,7 +76,7 @@ awslocal s3api put-bucket-notification-configuration \ The `notification.json` file should contain the following configuration: -```json +```json showLineNumbers { "QueueConfigurations": [ { diff --git a/src/content/docs/snowflake/features/stages.mdx b/src/content/docs/snowflake/features/stages.mdx index 603dd7f6..b216dec3 100644 --- a/src/content/docs/snowflake/features/stages.mdx +++ b/src/content/docs/snowflake/features/stages.mdx @@ -30,7 +30,7 @@ CREATE OR REPLACE DATABASE snowflake_tutorials; Similarly, you can create a table using the `CREATE TABLE` command. In this example, you can create a table called `employees` in `snowflake_tutorials.public`: -```sql +```sql showLineNumbers CREATE OR REPLACE TABLE employees ( first_name STRING , last_name STRING , @@ -96,7 +96,7 @@ awslocal s3 cp employees0*.csv s3://testbucket In this example, you can create a stage called `my_s3_stage` to load data from an S3 bucket: -```sql +```sql showLineNumbers CREATE STAGE my_s3_stage STORAGE_INTEGRATION = s3_int URL = 's3://testbucket/' @@ -105,7 +105,7 @@ FILE_FORMAT = csv; You can further copy data from the S3 stage to the table using the `COPY INTO` command: -```sql +```sql showLineNumbers COPY INTO mytable FROM @my_s3_stage PATTERN='.*employees.*.csv'; diff --git a/src/content/docs/snowflake/features/storage-integrations.md b/src/content/docs/snowflake/features/storage-integrations.md index 90cf1130..f18abbe8 100644 --- a/src/content/docs/snowflake/features/storage-integrations.md +++ b/src/content/docs/snowflake/features/storage-integrations.md @@ -33,7 +33,7 @@ awslocal s3 cp file.csv s3://testbucket You can now create a Storage Integration named `s_example` which will connect Snowflake to your S3 bucket using the following statement: -```sql +```sql showLineNumbers CREATE STORAGE INTEGRATION s_example TYPE = EXTERNAL_STAGE ENABLED = TRUE @@ -82,7 +82,7 @@ The expected output is: You can now create an external stage using the following statement: -```sql +```sql showLineNumbers CREATE STAGE stage_example STORAGE_INTEGRATION = s_example URL = 's3://testbucket' diff --git a/src/content/docs/snowflake/features/streamlit.md b/src/content/docs/snowflake/features/streamlit.md index 5723f880..df3aa95a 100644 --- a/src/content/docs/snowflake/features/streamlit.md +++ b/src/content/docs/snowflake/features/streamlit.md @@ -74,7 +74,7 @@ To connect to the Snowflake emulator while developing locally, Streamlit provide To run the sample against Snowflake emulator, your local `~/.streamlit/secrets.toml` should look like this: -```toml +```toml showLineNumbers [snowpark] user = "test" password = "test" diff --git a/src/content/docs/snowflake/features/streams.md b/src/content/docs/snowflake/features/streams.md index f99d4dea..7b5a367f 100644 --- a/src/content/docs/snowflake/features/streams.md +++ b/src/content/docs/snowflake/features/streams.md @@ -19,7 +19,7 @@ The following sections guide you through a simple example of using Streams to tr The following SQL snippet demonstrates how to create a table named `members` to store the names and fees paid by members of a gym, and a table named `signup` to store the dates when gym members joined. -```sql +```sql showLineNumbers -- Create a table to store the names and fees paid by members of a gym CREATE TABLE IF NOT EXISTS members ( id NUMBER(8) NOT NULL, @@ -46,7 +46,7 @@ CREATE STREAM IF NOT EXISTS member_check ON TABLE members; To insert data into the `members` and `signup` tables, use the `INSERT INTO` statement. The following example demonstrates how to insert data into the `members` and `signup` tables. -```sql +```sql showLineNumbers INSERT INTO members (id,name,fee) VALUES (1,'Joe',0), diff --git a/src/content/docs/snowflake/features/tasks.md b/src/content/docs/snowflake/features/tasks.md index 28c4e4f5..bcb4d84f 100644 --- a/src/content/docs/snowflake/features/tasks.md +++ b/src/content/docs/snowflake/features/tasks.md @@ -17,7 +17,7 @@ This guide is designed for users new to Tasks and assumes basic knowledge of SQL To create a task, use the `CREATE TASK` statement. The following example demonstrates how to create a task named `test_task` that inserts a record into a table named `sample_table` every minute. -```sql +```sql showLineNumbers CREATE TASK test_task WAREHOUSE = 'test' SCHEDULE = '1 MINUTE' diff --git a/src/content/docs/snowflake/features/transaction-management.md b/src/content/docs/snowflake/features/transaction-management.md index 1da6017e..fcca3e89 100644 --- a/src/content/docs/snowflake/features/transaction-management.md +++ b/src/content/docs/snowflake/features/transaction-management.md @@ -91,7 +91,7 @@ SHOW TRANSACTIONS; To undo uncommitted changes, use the `ROLLBACK` statement. Subsequent rollbacks have no effect. -```sql +```sql showLineNumbers BEGIN; INSERT INTO orders VALUES (3), (4); diff --git a/src/content/docs/snowflake/features/user-defined-functions.md b/src/content/docs/snowflake/features/user-defined-functions.md index b4e5145e..663eb554 100644 --- a/src/content/docs/snowflake/features/user-defined-functions.md +++ b/src/content/docs/snowflake/features/user-defined-functions.md @@ -17,7 +17,7 @@ In the Snowflake emulator, you can create JavaScript UDFs to extend the function You can create a JavaScript UDF using the `CREATE FUNCTION` statement. The following example creates a JavaScript UDF that receives a number as input and adds 5 to it. -```sql +```sql showLineNumbers CREATE OR REPLACE FUNCTION add5(n double) RETURNS double LANGUAGE JAVASCRIPT @@ -47,7 +47,7 @@ Start your Snowflake emulator and connect to it using a SQL client to execute th You can define a Java UDF using the `CREATE FUNCTION` statement and provide the Java source inline with the `AS` clause. -```sql +```sql showLineNumbers CREATE OR REPLACE FUNCTION echo_inline(x VARCHAR) RETURNS VARCHAR LANGUAGE JAVA @@ -84,7 +84,7 @@ The result of the query is: You can also compile your Java code into a `.jar` file, upload it to a Snowflake stage, and reference it using the `IMPORTS` clause. -```sql +```sql showLineNumbers -- Assume the JAR file has been uploaded to @mystage/testfunc.jar CREATE OR REPLACE FUNCTION echo_from_jar(x VARCHAR) RETURNS VARCHAR @@ -120,7 +120,7 @@ In the Snowflake emulator, you can create User-Defined Functions (UDFs) in Pytho You can create a Python UDF using the `CREATE FUNCTION` statement. The following example creates a Python UDF that takes a string as input and returns the string with a prefix. -```sql +```sql showLineNumbers CREATE OR REPLACE FUNCTION sample_func(sample_arg TEXT) RETURNS VARCHAR LANGUAGE PYTHON RUNTIME_VERSION='3.8' HANDLER='sample_func' diff --git a/src/content/docs/snowflake/getting-started/index.md b/src/content/docs/snowflake/getting-started/index.md index 7a971cf5..cfa63339 100644 --- a/src/content/docs/snowflake/getting-started/index.md +++ b/src/content/docs/snowflake/getting-started/index.md @@ -39,7 +39,7 @@ IMAGE_NAME=localstack/snowflake localstack start To start the Snowflake Docker container using the `docker` CLI, execute the following command: -```bash +```bash showLineNumbers docker run \ --rm -it \ -p 127.0.0.1:4566:4566 \ @@ -53,7 +53,7 @@ docker run \ Create a `docker-compose.yml` file with the specified content: -```yaml +```yaml showLineNumbers version: "3.8" services: diff --git a/src/content/docs/snowflake/getting-started/quickstart.md b/src/content/docs/snowflake/getting-started/quickstart.md index 3688c1c7..91aa44cb 100644 --- a/src/content/docs/snowflake/getting-started/quickstart.md +++ b/src/content/docs/snowflake/getting-started/quickstart.md @@ -42,7 +42,7 @@ The response should be: Create a new Python file named `main.py` and use the following code to connect to the Snowflake emulator: -```python +```python showLineNumbers import snowflake.connector as sf sf_conn_obj = sf.connect( @@ -60,7 +60,7 @@ Specify the `host` parameter as `snowflake.localhost.localstack.cloud` and the o Extend the Python program to insert rows from a list object into the emulated Snowflake table. Create a cursor object and execute the query: -```python +```python showLineNumbers print("1. Insert lot of rows from a list object to Snowflake table") print("2. Creating a cursor object") sf_cur_obj = sf_conn_obj.cursor() diff --git a/src/content/docs/snowflake/integrations/airflow.md b/src/content/docs/snowflake/integrations/airflow.md index f15cdd0f..6b0ecd97 100644 --- a/src/content/docs/snowflake/integrations/airflow.md +++ b/src/content/docs/snowflake/integrations/airflow.md @@ -15,7 +15,7 @@ On this page we outline how to set up the connection between local Airflow and t In order to create an Airflow environment in local MWAA, we can use the [`awslocal`](https://github.com/localstack/awscli-local) command: -```bash +```bash showLineNumbers awslocal s3 mb s3://my-mwaa-bucket awslocal mwaa create-environment --dag-s3-path /dags \ --execution-role-arn arn:aws:iam::000000000000:role/airflow-role \ @@ -29,7 +29,7 @@ awslocal mwaa create-environment --dag-s3-path /dags \ We can then create a local file `my_dag.py` with the Airflow DAG definition, for example: -```python +```python showLineNumbers import datetime import json @@ -85,7 +85,7 @@ The code listings below contain the patch for different Airflow versions - simpl **Airflow version 2.6.3 and above**: -```python +```python showLineNumbers # --- # patch for local Snowflake connection, for Airflow 2.6.3 and above from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook @@ -109,7 +109,7 @@ SnowflakeHook._get_conn_params = _get_conn_params **Airflow version 2.9.2 and above**: -```python +```python showLineNumbers # --- # patch for local Snowflake connection, for Airflow 2.9.2 / 2.10.1 from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook diff --git a/src/content/docs/snowflake/integrations/continuous-integration.mdx b/src/content/docs/snowflake/integrations/continuous-integration.mdx index 405c1e69..e99bfff8 100644 --- a/src/content/docs/snowflake/integrations/continuous-integration.mdx +++ b/src/content/docs/snowflake/integrations/continuous-integration.mdx @@ -28,7 +28,7 @@ The following examples demonstrate how to set up the emulator in GitHub Actions, -```yaml +```yaml showLineNumbers name: LocalStack Test on: [ push, pull_request ] @@ -52,7 +52,7 @@ jobs: ``` -```yaml +```yaml showLineNumbers version: 2.1 orbs: @@ -85,7 +85,7 @@ workflows: ``` -```yaml +```yaml showLineNumbers image: docker:20.10.16 stages: diff --git a/src/content/docs/snowflake/integrations/dbt.md b/src/content/docs/snowflake/integrations/dbt.md index 7b814b05..7ff79cc3 100644 --- a/src/content/docs/snowflake/integrations/dbt.md +++ b/src/content/docs/snowflake/integrations/dbt.md @@ -25,7 +25,7 @@ pip install dbt-snowflake Create or modify your `profiles.yml` file (typically located in `~/.dbt/profiles.yml`) to include the connection details for the Snowflake emulator: -```yaml +```yaml showLineNumbers localstack_snowflake: outputs: dev: @@ -61,7 +61,7 @@ Once configured, you can run standard dbt commands against the Snowflake emulato Here's a simple example of a dbt model which creates a table with a single row that you can use to test the integration: -```sql +```sql showLineNumbers -- models/example_model.sql {{ config(materialized='table') }} @@ -76,7 +76,7 @@ You can test your models using dbt's generic tests. Add the following to your `models/schema.yml`: -```yaml +```yaml showLineNumbers version: 2 models: @@ -119,7 +119,7 @@ my_dbt_project/ Example `dbt_project.yml`: -```yaml +```yaml showLineNumbers name: 'my_dbt_project' version: '1.0.0' config-version: 2 diff --git a/src/content/docs/snowflake/integrations/pulumi.md b/src/content/docs/snowflake/integrations/pulumi.md index 91b20968..487e07ad 100644 --- a/src/content/docs/snowflake/integrations/pulumi.md +++ b/src/content/docs/snowflake/integrations/pulumi.md @@ -35,7 +35,7 @@ npm install @pulumi/snowflake You can now use Pulumi to create Snowflake resources using the Snowflake provider. The following example shows how to create a Snowflake database using Pulumi: -```javascript +```javascript showLineNumbers import * as snowflake from "@pulumi/snowflake"; const simple = new snowflake.Database("simple", { diff --git a/src/content/docs/snowflake/integrations/snow-sql.md b/src/content/docs/snowflake/integrations/snow-sql.md index b6718c47..721f3db9 100644 --- a/src/content/docs/snowflake/integrations/snow-sql.md +++ b/src/content/docs/snowflake/integrations/snow-sql.md @@ -21,7 +21,7 @@ To install SnowSQL, follow the instructions in the [official SnowSQL documentati To start SnowSQL, execute the following command: -```bash +```bash showLineNumbers $ export SNOWSQL_PWD=test $ snowsql \ -a test \ diff --git a/src/content/docs/snowflake/integrations/snowflake-drivers.md b/src/content/docs/snowflake/integrations/snowflake-drivers.md index d2ff9604..88e369b8 100644 --- a/src/content/docs/snowflake/integrations/snowflake-drivers.md +++ b/src/content/docs/snowflake/integrations/snowflake-drivers.md @@ -19,7 +19,7 @@ pip install snowflake-connector-python The Snowflake emulator operates on `snowflake.localhost.localstack.cloud` - note that this is a DNS name that resolves to a local IP address (`127.0.0.1`) to make sure the connector interacts with the local APIs. Connect to the emulator using the following Python code: -```python +```python showLineNumbers import snowflake.connector as sf conn = sf.connect( @@ -33,7 +33,7 @@ conn = sf.connect( Subsequently, create a warehouse named `test_warehouse`, a database named `testdb`, and a schema named `testschema` using the Snowflake Connector for Python: -```python +```python showLineNumbers conn.cursor().execute("CREATE WAREHOUSE IF NOT EXISTS test_warehouse") conn.cursor().execute("CREATE DATABASE IF NOT EXISTS testdb") conn.cursor().execute("USE DATABASE testdb") @@ -52,7 +52,7 @@ npm install snowflake-sdk The Snowflake emulator runs on `snowflake.localhost.localstack.cloud`. Connect to the emulator using the following JavaScript code: -```javascript +```javascript showLineNumbers var snowflake = require('snowflake-sdk'); var connection = snowflake.createConnection({ username: 'test', @@ -74,7 +74,7 @@ connection.connect(function(err, conn) { Execute a query to create a database named `testdb` and verify the results using the following JavaScript code: -```javascript +```javascript showLineNumbers connection.execute({ sqlText: 'CREATE DATABASE testdb', complete: function(err, stmt, rows) { @@ -102,7 +102,7 @@ The connection string follows the format `username:password@host:port/database?a Here's an example of how to connect to the Snowflake emulator using Go: -```go +```go showLineNumbers package main import ( diff --git a/src/content/docs/snowflake/integrations/snowpark.md b/src/content/docs/snowflake/integrations/snowpark.md index d2748119..407c8636 100644 --- a/src/content/docs/snowflake/integrations/snowpark.md +++ b/src/content/docs/snowflake/integrations/snowpark.md @@ -21,7 +21,7 @@ In this getting started guide, we'll use the Snowpark Python library to establis The Snowflake emulator operates on `snowflake.localhost.localstack.cloud`. To create a Snowpark session in Python, use the following code: -```python +```python showLineNumbers from snowflake.snowpark import * from snowflake.snowpark.functions import * @@ -40,7 +40,7 @@ session = Session.builder.configs(connection_parameters).create() You can create a table named `sample_product_data` and fill the table with some data by executing SQL statements. Add the following Python code to create the table: -```python +```python showLineNumbers session.sql('CREATE OR REPLACE TABLE sample_product_data (id INT, parent_id INT, category_id INT, name VARCHAR, serial_number VARCHAR, key INT, "3rd" INT)').collect() [Row(status='Table SAMPLE_PRODUCT_DATA successfully created.')] session.sql(""" @@ -109,7 +109,7 @@ The following output should be displayed: You can perform local transformations on the DataFrame. For example, you can filter rows with the value of 'id' equal to 1: -```python +```python showLineNumbers df = session.table("sample_product_data").filter(col("id") == 1) df.show() ``` @@ -126,7 +126,7 @@ The following output should be displayed: Furthermore, you can also select specific columns: -```python +```python showLineNumbers df = session.table("sample_product_data").select(col("id"), col("name"), col("serial_number")) df.show() ``` diff --git a/src/content/docs/snowflake/integrations/terraform.md b/src/content/docs/snowflake/integrations/terraform.md index 253e9142..dc3fed81 100644 --- a/src/content/docs/snowflake/integrations/terraform.md +++ b/src/content/docs/snowflake/integrations/terraform.md @@ -17,7 +17,7 @@ In this guide, you will learn how to configure Terraform to interact with the Sn To use Terraform with the Snowflake emulator, you need to configure the Snowflake provider in your Terraform configuration file. The following example shows how to configure the Snowflake provider: -```hcl +```hcl showLineNumbers terraform { required_providers { snowflake = { @@ -48,7 +48,7 @@ export SNOWFLAKE_HOST=snowflake.localhost.localstack.cloud You can now use Terraform to create Snowflake resources using the Snowflake provider. The following example shows how to create a Snowflake database using Terraform: -```hcl +```hcl showLineNumbers resource "snowflake_database" "example" { name = "example" comment = "example database" diff --git a/src/content/docs/snowflake/tutorials/aws-lambda-localstack-snowpark.md b/src/content/docs/snowflake/tutorials/aws-lambda-localstack-snowpark.md index 91ddcfa1..5f76ac2b 100644 --- a/src/content/docs/snowflake/tutorials/aws-lambda-localstack-snowpark.md +++ b/src/content/docs/snowflake/tutorials/aws-lambda-localstack-snowpark.md @@ -28,14 +28,14 @@ The code in this tutorial is available on [GitHub](https://github.com/localstack Create a new directory for your lambda function and navigate to it: -```bash +```bash showLineNumbers mkdir -p lambda-snowpark cd lambda-snowpark ``` Create a new file named `handler.py` and add the following code: -```python +```python showLineNumbers import snowflake.connector as sf def lambda_handler(event, context): @@ -112,7 +112,7 @@ You can now install the dependencies for your Lambda function. These include: Run the following command: -```bash +```bash showLineNumbers pip3 install \ --platform manylinux2010_x86_64 \ --implementation cp \ @@ -125,7 +125,7 @@ pip3 install \ Package the Lambda function and its dependencies into a ZIP file. Run the following command: -```bash +```bash showLineNumbers mkdir -p build cp -r libs/* build/ (cd build && zip -q -r function-py.zip .) @@ -137,7 +137,7 @@ You have now created a ZIP file named `function-py.zip` that contains the Lambda Start your LocalStack container in your preferred terminal/shell. -```bash +```bash showLineNumbers export LOCALSTACK_AUTH_TOKEN= DEBUG=1 \ LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT=180 \ @@ -151,7 +151,7 @@ DEBUG=1 \ You can now deploy the Lambda function to LocalStack using the `awslocal` CLI. Run the following command: -```bash +```bash showLineNumbers awslocal lambda create-function \ --function-name localstack-snowflake-lambda-example \ --runtime python3.10 \ @@ -163,7 +163,7 @@ awslocal lambda create-function \ After successfully deploying the Lambda function, you will receive a response with the details of the function. You can now invoke the function using the `awslocal` CLI: -```bash +```bash showLineNumbers awslocal lambda invoke --function-name localstack-snowflake-lambda-example \ --payload '{"body": "test" }' output.txt ``` diff --git a/src/content/docs/snowflake/tutorials/credit-scoring-with-localstack-snowpark.md b/src/content/docs/snowflake/tutorials/credit-scoring-with-localstack-snowpark.md index 38a645e1..146ab67d 100644 --- a/src/content/docs/snowflake/tutorials/credit-scoring-with-localstack-snowpark.md +++ b/src/content/docs/snowflake/tutorials/credit-scoring-with-localstack-snowpark.md @@ -36,7 +36,7 @@ The next step is to configure the Snowflake emulator. The Snowflake emulator run Start Jupyter Notebook and create a new notebook. Add the following code to connect to the Snowflake emulator: -```python +```python showLineNumbers from snowflake.snowpark import * from snowflake.snowpark.functions import * @@ -53,7 +53,7 @@ session = Session.builder.configs(connection_parameters).create() In the above configuration, you can set `user`, `password`, `account`, and `warehouse` as `test` to avoid passing any production values. You can now run Snowflake SQL queries on your local machine. -```python +```python showLineNumbers session.sql("create or replace database credit_bank").collect() session.sql("use schema credit_bank.public").collect() print(session.sql("select current_warehouse(), current_database(), current_schema(), current_user(), current_role()").collect()) @@ -61,7 +61,7 @@ print(session.sql("select current_warehouse(), current_database(), current_schem -```bash +```bash showLineNumbers [Row(?COLUMN?='TEST', CURRENT_DATABASE='CREDIT_BANK', CURRENT_SCHEMA='public', ?COLUMN?='TEST', GET_CURRENT_ROLE='PUBLIC')] ``` @@ -74,7 +74,7 @@ You can now create two tables associated with this tutorial: Run the following code to create the `credit_df` table: -```python +```python showLineNumbers import pandas as pd credit_files = pd.read_csv('credit_files.csv') session.write_pandas(credit_files,"CREDIT_FILES",auto_create_table='True') @@ -90,7 +90,7 @@ StructType([StructField('CREDIT_REQUEST_ID', LongType(), nullable=True), StructF In a similar fashion, you can create the `credit_req_df` table: -```python +```python showLineNumbers credit_requests = pd.read_csv('credit_request.csv') session.write_pandas(credit_requests,"CREDIT_REQUESTS",auto_create_table='True') credit_req_df = session.table("CREDIT_REQUESTS") @@ -131,7 +131,7 @@ credit_df.toPandas().hist(figsize=(15,15)) You can also visualize the categorical features of the `credit_df` table: -```python +```python showLineNumbers import matplotlib.pyplot as plt import seaborn as sns