From 5cca5545e2fa8882bdcd49c907d348d499cb8215 Mon Sep 17 00:00:00 2001 From: HarshCasper Date: Thu, 19 Jun 2025 23:01:02 +0530 Subject: [PATCH 01/10] get the structure right --- src/components/DynamicTutorials.astro | 38 ++ .../tutorials/TutorialsShowcase.tsx | 603 ++++++++++++++++++ src/components/tutorials/types.ts | 17 + src/content/docs/aws/tutorials.md | 10 - .../docs/aws/tutorials/elb-load-balancing.mdx | 389 +++++++++++ src/content/docs/aws/tutorials/index.mdx | 11 + .../tutorials/s3-static-website-terraform.mdx | 386 +++++++++++ 7 files changed, 1444 insertions(+), 10 deletions(-) create mode 100644 src/components/DynamicTutorials.astro create mode 100644 src/components/tutorials/TutorialsShowcase.tsx create mode 100644 src/components/tutorials/types.ts delete mode 100644 src/content/docs/aws/tutorials.md create mode 100644 src/content/docs/aws/tutorials/elb-load-balancing.mdx create mode 100644 src/content/docs/aws/tutorials/index.mdx create mode 100644 src/content/docs/aws/tutorials/s3-static-website-terraform.mdx diff --git a/src/components/DynamicTutorials.astro b/src/components/DynamicTutorials.astro new file mode 100644 index 00000000..fadb0dd9 --- /dev/null +++ b/src/components/DynamicTutorials.astro @@ -0,0 +1,38 @@ +--- +import { getCollection } from 'astro:content'; +import { TutorialsShowcase } from './tutorials/TutorialsShowcase'; + +// Import data for filters +import services from '../data/developerhub/services.json'; +import platforms from '../data/developerhub/platforms.json'; +import deployments from '../data/developerhub/deployments.json'; + +const allTutorials = await getCollection('docs', ({ id }) => { + return id.startsWith('aws/tutorials/') && !id.includes('/index'); +}); + +const tutorialData = allTutorials.map(tutorial => { + const title = tutorial.data.title || 'Unknown Tutorial'; + const description = tutorial.data.description || `Tutorial: ${title}`; + const slug = tutorial.slug ? tutorial.slug.replace('aws/tutorials/', '') : ''; + + return { + title, + description, + slug, + leadimage: tutorial.data.leadimage, + services: tutorial.data.services || [], + platform: tutorial.data.platform || [], + deployment: tutorial.data.deployment || [], + pro: tutorial.data.pro || false, + }; +}); +--- + + \ No newline at end of file diff --git a/src/components/tutorials/TutorialsShowcase.tsx b/src/components/tutorials/TutorialsShowcase.tsx new file mode 100644 index 00000000..6903f698 --- /dev/null +++ b/src/components/tutorials/TutorialsShowcase.tsx @@ -0,0 +1,603 @@ +import React, { useState, useMemo } from 'react'; + +interface Tutorial { + title: string; + description: string; + slug: string; + leadimage?: string; + services: string[]; + platform: string[]; + deployment: string[]; + pro: boolean; +} + +interface FilterState { + services: string[]; + platforms: string[]; + deployments: string[]; + showProOnly: boolean; +} + +interface TutorialsShowcaseProps { + tutorials: Tutorial[]; + services: Record; + platforms: Record; + deployments: Record; +} + +const TutorialCard: React.FC<{ + tutorial: Tutorial; + services: Record; + platforms: Record; + deployments: Record; +}> = ({ tutorial, services, platforms, deployments }) => { + const imagePath = tutorial.leadimage ? `/images/aws/${tutorial.leadimage}` : '/images/aws/banner.png'; + + return ( +
+
+ {tutorial.title} +
+ {tutorial.pro && Pro} +
+
+ +
+

{tutorial.title}

+

{tutorial.description}

+ +
+
+ {tutorial.services.slice(0, 8).map((serviceCode) => ( +
+ {services[serviceCode] +
+ ))} + {tutorial.services.length > 8 && ( +
+{tutorial.services.length - 8}
+ )} +
+ + + Read Tutorial → + +
+
+
+ ); +}; + +export const TutorialsShowcase: React.FC = ({ + tutorials, + services, + platforms, + deployments, +}) => { + const [filters, setFilters] = useState({ + services: [], + platforms: [], + deployments: [], + showProOnly: false, + }); + + const [searchTerm, setSearchTerm] = useState(''); + const [sortBy, setSortBy] = useState<'title' | 'services'>('title'); + + // Get unique values for filters + const uniqueServices = useMemo(() => { + const allServices = new Set(tutorials.flatMap(tutorial => tutorial.services)); + return Array.from(allServices).sort((a, b) => (services[a] || a).localeCompare(services[b] || b)); + }, [tutorials, services]); + + const uniquePlatforms = useMemo(() => { + const allPlatforms = new Set(tutorials.flatMap(tutorial => tutorial.platform)); + return Array.from(allPlatforms).sort((a, b) => (platforms[a] || a).localeCompare(platforms[b] || b)); + }, [tutorials, platforms]); + + const uniqueDeployments = useMemo(() => { + const allDeployments = new Set(tutorials.flatMap(tutorial => tutorial.deployment)); + return Array.from(allDeployments).sort((a, b) => (deployments[a] || a).localeCompare(deployments[b] || b)); + }, [tutorials, deployments]); + + // Filter and sort tutorials + const filteredTutorials = useMemo(() => { + let filtered = tutorials.filter(tutorial => { + // Search filter + if (searchTerm) { + const searchLower = searchTerm.toLowerCase(); + const matchesSearch = + tutorial.title.toLowerCase().includes(searchLower) || + tutorial.description.toLowerCase().includes(searchLower) || + tutorial.services.some(service => (services[service] || service).toLowerCase().includes(searchLower)) || + tutorial.platform.some(platform => (platforms[platform] || platform).toLowerCase().includes(searchLower)); + if (!matchesSearch) return false; + } + + // Other filters + if (filters.services.length > 0 && !filters.services.some(service => tutorial.services.includes(service))) return false; + if (filters.platforms.length > 0 && !filters.platforms.some(platform => tutorial.platform.includes(platform))) return false; + if (filters.deployments.length > 0 && !filters.deployments.some(deployment => tutorial.deployment.includes(deployment))) return false; + if (filters.showProOnly && !tutorial.pro) return false; + + return true; + }); + + // Sort tutorials + return filtered.sort((a, b) => { + if (sortBy === 'title') { + return a.title.localeCompare(b.title); + } else { + return b.services.length - a.services.length; // Sort by number of services + } + }); + }, [tutorials, filters, searchTerm, sortBy, services, platforms]); + + const toggleFilter = (filterType: keyof FilterState, item: string) => { + if (filterType === 'showProOnly') return; + + setFilters(prev => ({ + ...prev, + [filterType]: prev[filterType].includes(item) + ? prev[filterType].filter(i => i !== item) + : [...prev[filterType], item] + })); + }; + + const clearAllFilters = () => { + setFilters({ + services: [], + platforms: [], + deployments: [], + showProOnly: false, + }); + setSearchTerm(''); + }; + + const hasActiveFilters = filters.services.length > 0 || + filters.platforms.length > 0 || + filters.deployments.length > 0 || + filters.showProOnly || + searchTerm.length > 0; + + return ( + <> + + +
+
+
+ setSearchTerm(e.target.value)} + className="search-input" + /> + {searchTerm && ( + + )} +
+ + + + + + + + + + + + {hasActiveFilters && ( + + )} +
+ +
+ {filteredTutorials.length} tutorial{filteredTutorials.length !== 1 ? 's' : ''} +
+ +
+ {filteredTutorials.map((tutorial, index) => ( + + ))} + + {filteredTutorials.length === 0 && ( +
+

No tutorials found

+

Try adjusting your search or filters.

+ +
+ )} +
+
+ + ); +}; diff --git a/src/components/tutorials/types.ts b/src/components/tutorials/types.ts new file mode 100644 index 00000000..d2191df0 --- /dev/null +++ b/src/components/tutorials/types.ts @@ -0,0 +1,17 @@ +export interface Tutorial { + title: string; + description: string; + slug: string; + leadimage?: string; + services: string[]; + deployment: string[]; + platform: string[]; + pro: boolean; +} + +export interface TutorialFilterState { + services: string[]; + platforms: string[]; + deployments: string[]; + showProOnly: boolean; +} \ No newline at end of file diff --git a/src/content/docs/aws/tutorials.md b/src/content/docs/aws/tutorials.md deleted file mode 100644 index 6719b15b..00000000 --- a/src/content/docs/aws/tutorials.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Tutorials -description: This is a dummy description -template: doc -sidebar: - order: 9 ---- - -# Tutorials -These tutorials enhance your comprehension of LocalStack’s functionality by providing detailed information on how it works for specific use cases using diverse resources. These guides help you set up and build your local cloud development & testing environment with the help of LocalStack, using local AWS services, integrations, and tools, helping create a mental model for how LocalStack works. For community contributed tutorials, check out the LocalStack Community Tutorials. \ No newline at end of file diff --git a/src/content/docs/aws/tutorials/elb-load-balancing.mdx b/src/content/docs/aws/tutorials/elb-load-balancing.mdx new file mode 100644 index 00000000..64acf699 --- /dev/null +++ b/src/content/docs/aws/tutorials/elb-load-balancing.mdx @@ -0,0 +1,389 @@ +--- +title: "Setting up Elastic Load Balancing (ELB) Application Load Balancers using LocalStack, deployed via the Serverless framework" +description: Learn how to configure Elastic Load Balancing (ELB) Application Load Balancers and set up Node.js Lambda functions as targets. This tutorial demonstrates how to forward requests to the target group for your Lambda function using the Serverless Framework and the `serverless-localstack` plugin to effortlessly deploy and manage your infrastructure locally with LocalStack. +services: +- elb +- lmb +platform: +- JavaScript +deployment: +- serverless +pro: true +leadimage: "elb-load-balancing-featured-image.png" +--- + +[Elastic Load Balancer (ELB)](https://aws.amazon.com/elasticloadbalancing/) is a service that distributes incoming application traffic across multiple targets, such as EC2 instances, containers, IP addresses, and Lambda functions. +ELBs can be physical hardware or virtual software components. +They accept incoming traffic and distribute it across multiple targets in one or more Availability Zones. +Using ELB, you can quickly scale your load balancer to accommodate changes in traffic over time, ensuring optimal performance for your application and workloads running on the AWS infrastructure. + +ELB provides three types of load balancers: [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html), [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html), [Classic Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/gateway/introduction.html), and [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html). + +In this tutorial we focus on the Application Load Balancer (ALB), which operates at the Application layer of the OSI model and is specifically designed for load balancing HTTP and HTTPS traffic for web applications. +ALB works at the request level, allowing advanced load-balancing features for HTTP and HTTPS requests. +It also enables you to register Lambda functions as targets. +You can configure a listener rule that forwards requests to a target group for your Lambda function, triggering its execution to process the request. + +[LocalStack Pro](https://localstack.cloud) extends support for ELB Application Load Balancers and the configuration of target groups, including Lambda functions. +This tutorial will guide you through setting up an ELB Application Load Balancer to configure Node.js Lambda functions as targets. +We will utilize the [Serverless framework](http://serverless.com/) along with the [`serverless-localstack` plugin](https://www.serverless.com/plugins/serverless-localstack) to simplify the setup. +Additionally, we will demonstrate how to set up ELB endpoints to efficiently forward requests to the target group associated with your Lambda functions. + +## Prerequisites + +- LocalStack Pro +- [Serverless framework](https://www.serverless.com/framework/docs/getting-started/) +- [Node.js & `npm`](https://nodejs.org/en/download/) +- [awslocal](https://github.com/localstack/awscli-local) +- [curl](https://curl.se/) and [jq](https://jqlang.github.io/jq/) + +## Setup a Serverless project + +Serverless is an open-source framework that enables you to build, package, and deploy serverless applications seamlessly across various cloud providers and platforms. +With the Serverless framework, you can easily set up your serverless development environment, define your applications as functions and events, and deploy your entire infrastructure to the cloud using a single command. +To start using the Serverless framework, install the Serverless framework globally by executing the following command using `npm`: + +```bash +$ npm install -g serverless +``` + +The above command installs the Serverless framework globally on your machine. +After the installation is complete, you can verify it by running the following command: + +```bash +serverless --version + +Framework Core: 3.24.1 +Plugin: 6.2.2 +SDK: 4.3.2 +``` + +This command displays the version numbers of the Serverless framework's core, plugins, and SDK you installed. +Now, let's proceed with creating a new Serverless project using the `serverless` command: + +```bash +serverless create --template aws-nodejs --path serverless-elb +``` + +In this example, we use the `aws-nodejs` template to create our Serverless project. +This template includes a simple Node.js Lambda function that returns a message when invoked. +It also generates a `serverless.yml` file that contains the project's configuration. + +The `serverless.yml` file is where you configure your project. +It includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. +If you prefer to set up your project using a different template, refer to the [Serverless templates documentation](https://www.serverless.com/framework/docs/providers/aws/cli-reference/create/) for more options. + +Now that we have created our Serverless project, we can proceed to configure it to use LocalStack. + +## Configure Serverless project to use LocalStack + +To configure your Serverless project to use LocalStack, you need to install the `serverless-localstack` plugin. +Before that, let's initialize the project and install some dependencies: + +```bash +npm init -y +npm install -D serverless serverless-localstack serverless-deployment-bucket +``` + +In the above commands, we use `npm init -y` to initialize a new Node.js project with default settings and then install the necessary dependencies, including `serverless`, `serverless-localstack`, and `serverless-deployment-bucket`, as dev dependencies. + +The `serverless-localstack` plugin enables your Serverless project to redirect AWS API calls to LocalStack, while the `serverless-deployment-bucket` plugin creates a deployment bucket in LocalStack. +This bucket is responsible for storing the deployment artifacts and ensuring that old deployment buckets are properly cleaned up after each deployment. + +We have a `serverless.yml` file in the directory to define our Serverless project's configuration, which includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. +To set up the plugins we installed earlier, you need to add the following properties to your `serverless.yml` file: + +```yaml +service: serverless-elb + +frameworkVersion: '3' + +provider: + name: aws + runtime: nodejs12.x + + +functions: + hello: + handler: handler.hello + +plugins: + - serverless-deployment-bucket + - serverless-localstack + +custom: + localstack: + stages: + - local +``` + +To configure Serverless to use the LocalStack plugin specifically for the `local` stage and ensure that your Serverless project only deploys to LocalStack instead of the real AWS Cloud, you need to set the `--stage` flag when using the `serverless deploy` command and specify the flag variable as `local`. + +Configure a `deploy` script in your `package.json` file to simplify the deployment process. +It lets you run the `serverless deploy` command directly over your local infrastructure. +Update your `package.json` file to include the following: + +```json +{ + "name": "serverless-elb", + "version": "1.0.0", + "description": "", + "main": "handler.js", + "scripts": { + "deploy": "sls deploy --stage local" + }, + "keywords": [], + "author": "", + "license": "ISC", + "devDependencies": { + "serverless": "^3.25.0", + "serverless-deployment-bucket": "^1.6.0", + "serverless-localstack": "^1.0.1" + } +} +``` + +With this configuration, you can now run the deployment script using: + +```bash +npm run deploy +``` + +This will execute the `serverless deploy --stage local` command, deploying your Serverless project to LocalStack. + +## Create Lambda functions & ELB Application Load Balancers + +Now, let's create two Lambda functions named `hello1` and `hello2` that will run on the Node.js 12.x runtime. +Open the `handler.js` file and replace the existing code with the following: + +```js +'use strict'; + +module.exports.hello1 = async (event) => { + console.log(event); + return { + "isBase64Encoded": false, + "statusCode": 200, + "statusDescription": "200 OK", + "headers": { + "Content-Type": "text/plain" + }, + "body": "Hello 1" + }; +}; + +module.exports.hello2 = async (event) => { + console.log(event); + return { + "isBase64Encoded": false, + "statusCode": 200, + "statusDescription": "200 OK", + "headers": { + "Content-Type": "text/plain" + }, + "body": "Hello 2" + }; +}; +``` + +We have defined the `hello1` and `hello2` Lambda functions in the updated code. +Each function receives an event parameter and logs it to the console. +The function then returns a response with a status code of 200 and a plain text body containing the respective `"Hello"` message. +It's important to note that the `isBase64Encoded` property is not required for plain text responses. +It is typically used when you need to include binary content in the response body and want to indicate that the content is Base64 encoded. + +Let us now configure the `serverless.yml` file to create an Application Load Balancer (ALB) and attach the Lambda functions to it. + +```yaml +service: serverless-elb + +provider: + name: aws + runtime: nodejs12.x + deploymentBucket: + name: testbucket + +functions: + hello1: + handler: handler.hello1 + events: + - alb: + listenerArn: !Ref HTTPListener + priority: 1 + conditions: + path: /hello1 + hello2: + handler: handler.hello2 + events: + - alb: + listenerArn: !Ref HTTPListener + priority: 2 + conditions: + path: /hello2 + +plugins: + - serverless-deployment-bucket + - serverless-localstack + +custom: + localstack: + stages: + - local +``` + +In the above configuration, we specify the service name (`serverless-elb` in this case) and set the provider to AWS with the Node.js 12.x runtime. +We include the necessary plugins, `serverless-localstack` and `serverless-deployment-bucket`, for LocalStack support and deployment bucket management. +Next, we define the `hello1` and `hello2` functions with their respective handlers and event triggers. +In this example, both functions are triggered by HTTP GET requests to the `/hello1` and `/hello2` paths. + +Lastly, let's create a VPC, a subnet, an Application Load Balancer, and an HTTP listener on the load balancer that redirects traffic to the target group. +To do this, add the following resources to your `serverless.yml` file: + +```yaml +... +resources: + Resources: + LoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: lb-test-1 + Subnets: + - !Ref Subnet + HTTPListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Port: 443 + Host: "#{host}" + LoadBalancerArn: !Ref LoadBalancer + Protocol: HTTP + Subnet: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 12.2.1.0/24 + AvailabilityZone: !Select + - 0 + - Fn::GetAZs: !Ref "AWS::Region" + VPC: + Type: AWS::EC2::VPC + Properties: + EnableDnsSupport: "true" + EnableDnsHostnames: "true" + CidrBlock: 12.2.1.0/24 +``` + +With these resource definitions, you have completed the configuration of your Serverless project. +Now you can create your local AWS infrastructure on LocalStack and deploy your Application Load Balancers with the two Lambda functions as targets. + +## Creating the infrastructure on LocalStack + +Now that we have completed the initial setup let's run LocalStack's AWS emulation on our local machine. +Start LocalStack by running the following command: + +```bash +LOCALSTACK_AUTH_TOKEN= localstack start -d +``` + +This command launches LocalStack in the background, enabling you to use the AWS services locally. +Now, let's deploy our Serverless project and verify the resources created in LocalStack. +Run the following command: + +```bash +npm run deploy +``` + +This command deploys your Serverless project using the "local" stage. +The output will resemble the following: + +```bash +> serverless-elb@1.0.0 deploy +> sls deploy --stage local + +Using serverless-localstack + +Deploying test-elb-load-balancing to stage local (us-east-1) +Creating deployment bucket 'testbucket'... +Using deployment bucket 'testbucket' +Skipping template validation: Unsupported in Localstack + +✔ Service deployed to stack test-elb-load-balancing-local (15s) + +functions: + hello1: test-elb-load-balancing-local-hello1 (157 kB) + hello2: test-elb-load-balancing-local-hello2 (157 kB) +``` + +This output confirms the successful deployment of your Serverless service to the `local` stage in LocalStack. +It also displays information about the deployed Lambda functions (`hello1` and `hello2`). +You can run the following command to verify that the functions and the load balancers have been deployed: + +```bash +awslocal lambda list-functions +{ + "Functions": [ + { + "FunctionName": "test-elb-load-balancing-local-hello1", + "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:test-elb-load-balancing-local-hello1", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::000000000000:role/test-elb-load-balancing-local-us-east-1-lambdaRole", + "Handler": "handler.hello1", + ... + }, + { + "FunctionName": "test-elb-load-balancing-local-hello2", + "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:test-elb-load-balancing-local-hello2", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::000000000000:role/test-elb-load-balancing-local-us-east-1-lambdaRole", + "Handler": "handler.hello2", + ... + } + ] +} + +$ awslocal elbv2 describe-load-balancers +{ + "LoadBalancers": [ + { + "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-east-1:000000000000:loadbalancer/app/lb-test-1/", + "DNSName": "lb-test-1.elb.localhost.localstack.cloud", + "CanonicalHostedZoneId": "", + "CreatedTime": "", + "LoadBalancerName": "lb-test-1", + "Scheme": "None", + ... + } + ] +} +``` + +The ALB endpoints for the two Lambda functions, hello1 and hello2, are accessible at the following URLs: + +- [`http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1`](http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1) +- [`http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2`](http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2) + +To test these endpoints, you can use the curl command along with the jq tool for better formatting. +Run the following commands: + +```bash +curl http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1 | jq +"Hello 1" +curl http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2 | jq +"Hello 2" +``` + +Both commands send an HTTP GET request to the endpoints and uses `jq` to format the response. +The expected outputs are `Hello 1` & `Hello 2`, representing the Lambda functions' response. + +## Conclusion + +In this tutorial, we have learned how to create an Application Load Balancer (ALB) with two Lambda functions as targets using LocalStack. +We have also explored creating, configuring, and deploying a Serverless project with LocalStack. +This enables developers to develop and test Cloud and Serverless applications locally conveniently. + +LocalStack offers integrations with various popular tools such as Terraform, Pulumi, Serverless Application Model (SAM), and more. +For more information about LocalStack integrations, you can refer to our [Integration documentation](). +To further explore and experiment with the concepts covered in this tutorial, you can access the code and resources on our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/elb-load-balancing) along with a `Makefile` for step-by-step execution. diff --git a/src/content/docs/aws/tutorials/index.mdx b/src/content/docs/aws/tutorials/index.mdx new file mode 100644 index 00000000..615e670a --- /dev/null +++ b/src/content/docs/aws/tutorials/index.mdx @@ -0,0 +1,11 @@ +--- +title: Tutorials +description: These tutorials enhance your comprehension of LocalStack's functionality by providing detailed information on how it works for specific use cases using diverse resources. +template: doc +sidebar: + order: 9 +--- + +import DynamicTutorials from '../../../../components/DynamicTutorials.astro'; + + diff --git a/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx new file mode 100644 index 00000000..16446e69 --- /dev/null +++ b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx @@ -0,0 +1,386 @@ +--- +title: "Host a static website locally using Simple Storage Service (S3) and Terraform with LocalStack" +description: > + Host a static website using a Simple Storage Service (S3) bucket to serve static content by provisioning the infrastructure using Terraform in LocalStack. Learn how to configure S3 buckets locally for testing and integration, and make use of LocalStack's S3 API & `tflocal` CLI to provision infrastructure locally. +services: +- s3 +platform: +- html +deployment: +- terraform +pro: false +leadimage: "s3-static-website-terraform-featured-image.png" +--- + +[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is a proprietary object storage solution that can store an unlimited number of objects for many use cases. +S3 is a highly scalable, durable and reliable service that we can use for various use cases: hosting a static site, handling big data analytics, managing application logs, storing web assets and much more! + +With S3, you have unlimited storage with your data stored in buckets. +A bucket refers to a directory, while an object is just another term for a file. +Every object (file) stores the name of the file (key), the contents (value), a version ID and the associated metadata. +You can also use S3 to host a static website, to serve static content. +It might include HTML, CSS, JavaScript, images, and other assets that make up your website. + +LocalStack supports the S3 API, which means you can use the same API calls to interact with S3 in LocalStack as you would with AWS. +Using LocalStack, you can create and manage S3 buckets and objects locally, use AWS SDKs and third-party integrations to work with S3, and test your applications without making any significant alterations. +LocalStack also supports the creation of S3 buckets with static website hosting enabled. + +In this tutorial, we will deploy a static website using an S3 bucket over a locally emulated AWS infrastructure on LocalStack. +We will use Terraform to automate the creation & management of AWS resources by declaring them in the HashiCorp Configuration Language (HCL). +We will also learn about `tflocal`, a CLI wrapper created by LocalStack, that allows you to run Terraform locally against LocalStack. + +## Prerequisites + +For this tutorial, you will need: + +- [LocalStack Community](https://github.com/localstack/localstack) +- [Terraform](https://www.terraform.io/downloads.html) +- [awslocal](https://github.com/localstack/awscli-local) + +## Creating a static website + +We will create a simple static website using plain HTML to get started. +To create a static website deployed over S3, we need to create an index document and a custom error document. +We will name our index document `index.html` and our error document `error.html`. +Optionally, you can create a folder called `assets` to store images and other assets. + +Let's create a directory named `s3-static-website-localstack` where we'll store our static website files. +If you don't have an `index.html` file, you can use the following code to create one: + +```html + + + + + + Static Website + + +

Static Website deployed locally over S3 using LocalStack

+ + +``` + +S3 will serve this file when a user visits the root URL of your static website, serving as the default page. +In a similar fashion, you can configure a custom error document that contains a user-friendly error message. +Let's create a file named `error.html` and add the following code: + +```html + + + + + 404 + + +

Something is amiss.

+ + +``` + +S3 will return the above file content only for HTTP 4XX error codes. +Some browsers might choose to display their custom error message if a user tries to access a resource that does not exist. +In this case, browsers might ignore the above error document. +With the initial setup complete, we can now move on to creating a static website using S3 via `awslocal`, LocalStack's wrapper for the AWS CLI. + +## Hosting a static website using S3 + +To create a static website using S3, we need to create a bucket, enable static website hosting, and upload the files to the bucket. +We will use the `awslocal` CLI for these operations. +Navigate to the root directory of the project and create a bucket named `testwebsite` using LocalStack's S3 API: + +```bash +awslocal s3api create-bucket --bucket testwebsite +``` + +With the bucket created, we can now attach a policy to it to allow public access and its contents. +Let's create a file named `bucket_policy.json` in the root directory and add the following code: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadGetObject", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::testwebsite/*" + } + ] +} +``` + +Let's now attach the policy to the bucket: + +```bash +awslocal s3api put-bucket-policy --bucket testwebsite --policy file://bucket_policy.json +``` + +With the policy attached, we can now sync the contents of our root directory to the bucket: + +```bash +awslocal s3 sync ./ s3://testwebsite +``` + +We'll now enable static website hosting on the bucket and configure the index and error documents: + +```bash +awslocal s3 website s3://testwebsite/ --index-document index.html --error-document error.html +``` + +If you are deploying a static website using S3 on real AWS cloud, your S3 website endpoint will follow one of these two formats: + +- `http://.s3-website-.amazonaws.com` +- `http://.s3-website..amazonaws.com` + +In LocalStack, the S3 website endpoint follows the following format: `http://.s3-website.localhost.localstack.cloud:4566`. +You can navigate to [`http://testwebsite.s3-website.localhost.localstack.cloud:4566/`](http://testwebsite.s3-website.localhost.localstack.cloud:4566/) to view your static website. + +## Orchestrating infrastructure using Terraform + +You can automate the above process by orchestrating your AWS infrastructure using Terraform. +Terraform is an infrastructure as code (IaC) tool that allows you to create, manage, and version your infrastructure. +Terraform uses a declarative configuration language called HashiCorp Configuration Language (HCL) to describe your infrastructure. + +Before that, we would need to manually configure the local service endpoints and credentials for Terraform to integrate with LocalStack. +We will use the [AWS Provider for Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to interact with the many resources supported by AWS in LocalStack. +Create a new file named `provider.tf` and specify mock credentials for the AWS provider: + +```hcl +provider "aws" { + region = "us-east-1" + access_key = "fake" + secret_key = "fake" +} +``` + +We would also need to avoid issues with routing and authentication (as we do not need it). +Therefore we need to supply some general parameters. +Additionally, we have to point the individual services to LocalStack. +We can do this by specifying the `endpoints` parameter for each service, that we intend to use. +Our `provider.tf` file should look like this: + +```hcl +provider "aws" { + access_key = "test" + secret_key = "test" + region = "us-east-1" + + # only required for non virtual hosted-style endpoint use case. + # https://registry.terraform.io/providers/hashicorp/aws/latest/docs#s3_force_path_style + s3_use_path_style = false + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + + endpoints { + s3 = "http://s3.localhost.localstack.cloud:4566" + } +} +``` + +:::note +We use `localhost.localstack.cloud` as the recommended endpoint for the S3 to enable host-based bucket endpoints. +Users can rely on the `localhost.localstack.cloud` domain to be publicly resolvable. +We also publish an SSL certificate which is automatically used inside LocalStack to enable HTTPS endpoints with valid certificates. +For most of the other services, it is fine to use `localhost:4566`. +::: + +With the provider configured, we can now configure the variables for our S3 bucket. +Create a new file named `variables.tf` and add the following code: + +```hcl +variable "bucket_name" { + description = "Name of the s3 bucket. Must be unique." + type = string +} + +variable "tags" { + description = "Tags to set on the bucket." + type = map(string) + default = {} +} +``` + +We take a user input for the bucket name and tags. +Next, we will define the output variables for our Terraform configuration. +Create a new file named `outputs.tf` and add the following code: + +```hcl +output "arn" { + description = "ARN of the bucket" + value = aws_s3_bucket.s3_bucket.arn +} + +output "name" { + description = "Name (id) of the bucket" + value = aws_s3_bucket.s3_bucket.id +} + +output "domain" { + description = "Domain name of the bucket" + value = aws_s3_bucket_website_configuration.s3_bucket.website_domain +} + +output "website_endpoint" { + value = aws_s3_bucket_website_configuration.s3_bucket.website_endpoint +} +``` + +The output variables are the ARN, name, domain name, and website endpoint of the bucket. +With all the configuration files in place, we can now create the S3 bucket. +Create a new file named `main.tf` and create the S3 bucket using the following code: + +```hcl +resource "aws_s3_bucket" "s3_bucket" { + bucket = var.bucket_name + tags = var.tags +} +``` + +To configure the static website hosting, we will use the `aws_s3_bucket_website_configuration` resource. +Add the following code to the `main.tf` file: + +```hcl +resource "aws_s3_bucket_website_configuration" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + + index_document { + suffix = "index.html" + } + + error_document { + key = "error.html" + } + +} +``` + +To set the bucket policy, we will use the `aws_s3_bucket_policy` resource. +Add the following code to the `main.tf` file: + +```hcl +resource "aws_s3_bucket_acl" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + acl = "public-read" +} + +resource "aws_s3_bucket_policy" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "PublicReadGetObject" + Effect = "Allow" + Principal = "*" + Action = "s3:GetObject" + Resource = [ + aws_s3_bucket.s3_bucket.arn, + "${aws_s3_bucket.s3_bucket.arn}/*", + ] + }, + ] + }) +} +``` + +In the above code, we are setting the ACL of the bucket to `public-read` and setting the bucket policy to allow public access to the bucket. +Pick up an appropriate policy based on your use case. +Let's use the `aws_s3_object` resource to upload the files to the bucket. +Add the following code to the `main.tf` file: + +```hcl +resource "aws_s3_object" "object_www" { + depends_on = [aws_s3_bucket.s3_bucket] + for_each = fileset("${path.root}", "*.html") + bucket = var.bucket_name + key = basename(each.value) + source = each.value + etag = filemd5("${each.value}") + content_type = "text/html" + acl = "public-read" +} +``` + +The above code uploads all our html files to the bucket. +We are also setting the ACL of the files to `public-read`. +Optionally, if you have static assets like images, CSS, and JavaScript files, you can upload them to the bucket using the same `aws_s3_bucket_object` resource by adding the following code to the `main.tf` file: + +```hcl +resource "aws_s3_object" "object_assets" { + depends_on = [aws_s3_bucket.s3_bucket] + for_each = fileset(path.module, "assets/*") + bucket = var.bucket_name + key = each.value + source = "${each.value}" + etag = filemd5("${each.value}") + acl = "public-read" +} +``` + +With all the configuration files in place, we can now initialize the Terraform configuration. +Run the following command to initialize the Terraform configuration: + +```bash +terraform init + +... +Terraform has been successfully initialized! +... +``` + +We can create an execution plan based on our Terraform configuration for the AWS resources. +Run the following command to create an execution plan: + +```bash +terraform plan +``` + +Finally, we can apply the Terraform configuration to create the AWS resources. +Run the following command to apply the Terraform configuration: + +```bash +terraform apply + +var.bucket_name + Name of the s3 bucket. +Must be unique. + + Enter a value: testbucket +... +arn = "arn:aws:s3:::testbucket" +domain = "s3-website-us-east-1.amazonaws.com" +name = "testbucket" +website_endpoint = "testbucket.s3-website-us-east-1.amazonaws.com" +``` + +In the above command, we specified `testbucket` as the bucket name. +You can specify any bucket name since LocalStack is ephemeral, and stopping your LocalStack container will delete all the created resources. +The above command output includes the ARN, name, domain name, and website endpoint of the bucket. +You can see the `website_endpoint` configured to use AWS S3 Website Endpoint. +You can now access the website using the bucket name in the following format: `http://.s3-website.localhost.localstack.cloud:4566`. +Since the endpoint is configured to use `localhost.localstack.cloud`, no real AWS resources have been created. + +You can optionally use the `tflocal` CLI as a drop-in replacement for the official Terraform CLI. `tflocal` uses the Terraform Override mechanism to create a temporary `localstack_providers_override.tf` file, which is deleted after the infrastructure is created. +It mitigates the need to create the `provider.tf` file manually. +You can use `tflocal` to create the infrastructure by running the following commands: + +```bash +tflocal init +tflocal plan +tflocal apply +``` + +## Conclusion + +In this tutorial, we have seen how to use LocalStack to create an S3 bucket and configure it to serve a static website. +We have also seen how you can use Terraform to provision AWS infrastructure in an emulated local environment using LocalStack. +You can use the [LocalStack App](https://app.localstack.cloud) to view the created buckets and files on the LocalStack Resource dashboard for S3 and upload more files or perform other operations on the bucket. +Using LocalStack, you can perform various operations using emulated S3 buckets and other AWS services without creating any real AWS resources. + +The code for this tutorial can be found in our [LocalStack Terraform samples over GitHub](https://github.com/localstack/localstack-terraform-samples/tree/master/s3-static-website). +Please make sure to adjust the paths for the HTML files in `main.tf`. +Further documentation for S3 is available on our [S3 documentation](/aws/services/s3). From 4ef52faf9b78e9980a7a4367d3d5dd40bda5b82d Mon Sep 17 00:00:00 2001 From: HarshCasper Date: Thu, 19 Jun 2025 23:09:16 +0530 Subject: [PATCH 02/10] fix issue with opening the tutorial --- src/components/DynamicTutorials.astro | 2 +- src/components/tutorials/TutorialsShowcase.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/components/DynamicTutorials.astro b/src/components/DynamicTutorials.astro index fadb0dd9..7fe4b204 100644 --- a/src/components/DynamicTutorials.astro +++ b/src/components/DynamicTutorials.astro @@ -14,7 +14,7 @@ const allTutorials = await getCollection('docs', ({ id }) => { const tutorialData = allTutorials.map(tutorial => { const title = tutorial.data.title || 'Unknown Tutorial'; const description = tutorial.data.description || `Tutorial: ${title}`; - const slug = tutorial.slug ? tutorial.slug.replace('aws/tutorials/', '') : ''; + const slug = tutorial.id; // Use id instead of slug for proper path return { title, diff --git a/src/components/tutorials/TutorialsShowcase.tsx b/src/components/tutorials/TutorialsShowcase.tsx index 6903f698..4dd7a07c 100644 --- a/src/components/tutorials/TutorialsShowcase.tsx +++ b/src/components/tutorials/TutorialsShowcase.tsx @@ -62,7 +62,7 @@ const TutorialCard: React.FC<{ Read Tutorial → From 437df5325080423a4a7738e502212ed58f12dd4d Mon Sep 17 00:00:00 2001 From: HarshCasper Date: Thu, 19 Jun 2025 23:20:40 +0530 Subject: [PATCH 03/10] make filtering work --- .../tutorials/TutorialsShowcase.tsx | 21 ++++++++++++++----- src/content.config.ts | 15 +++++++++++-- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/components/tutorials/TutorialsShowcase.tsx b/src/components/tutorials/TutorialsShowcase.tsx index 4dd7a07c..b11a4597 100644 --- a/src/components/tutorials/TutorialsShowcase.tsx +++ b/src/components/tutorials/TutorialsShowcase.tsx @@ -96,7 +96,9 @@ export const TutorialsShowcase: React.FC = ({ }, [tutorials, services]); const uniquePlatforms = useMemo(() => { - const allPlatforms = new Set(tutorials.flatMap(tutorial => tutorial.platform)); + const allPlatforms = new Set(tutorials.flatMap(tutorial => + tutorial.platform.map(p => p.toLowerCase()) // Convert to lowercase + )); return Array.from(allPlatforms).sort((a, b) => (platforms[a] || a).localeCompare(platforms[b] || b)); }, [tutorials, platforms]); @@ -121,7 +123,7 @@ export const TutorialsShowcase: React.FC = ({ // Other filters if (filters.services.length > 0 && !filters.services.some(service => tutorial.services.includes(service))) return false; - if (filters.platforms.length > 0 && !filters.platforms.some(platform => tutorial.platform.includes(platform))) return false; + if (filters.platforms.length > 0 && !filters.platforms.some(platform => tutorial.platform.map(p => p.toLowerCase()).includes(platform))) return false; if (filters.deployments.length > 0 && !filters.deployments.some(deployment => tutorial.deployment.includes(deployment))) return false; if (filters.showProOnly && !tutorial.pro) return false; @@ -510,7 +512,10 @@ export const TutorialsShowcase: React.FC = ({ e.target.value ? toggleFilter('platforms', e.target.value) : null} + onChange={(e) => setFilters(prev => ({ + ...prev, + platforms: e.target.value ? [e.target.value] : [] + }))} className="filter-select" > @@ -536,7 +544,10 @@ export const TutorialsShowcase: React.FC = ({ setSearchTerm(e.target.value)} + className="search-input" + /> + {searchTerm && ( + + )} + + + + + + + + + + + + + {hasActiveFilters && ( + + )} + + +
+ {filteredTutorials.length} tutorial{filteredTutorials.length !== 1 ? 's' : ''} +
+ +
+ {filteredTutorials.map((tutorial, index) => ( + + ))} + + {filteredTutorials.length === 0 && ( +
+

No tutorials found

+

Try adjusting your search or filters.

+ +
+ )} +
+ + + ); +}; diff --git a/src/components/tutorials/types.ts b/src/components/tutorials/types.ts new file mode 100644 index 00000000..d2191df0 --- /dev/null +++ b/src/components/tutorials/types.ts @@ -0,0 +1,17 @@ +export interface Tutorial { + title: string; + description: string; + slug: string; + leadimage?: string; + services: string[]; + deployment: string[]; + platform: string[]; + pro: boolean; +} + +export interface TutorialFilterState { + services: string[]; + platforms: string[]; + deployments: string[]; + showProOnly: boolean; +} \ No newline at end of file diff --git a/src/content/docs/aws/tutorials.md b/src/content/docs/aws/tutorials.md deleted file mode 100644 index 6719b15b..00000000 --- a/src/content/docs/aws/tutorials.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Tutorials -description: This is a dummy description -template: doc -sidebar: - order: 9 ---- - -# Tutorials -These tutorials enhance your comprehension of LocalStack’s functionality by providing detailed information on how it works for specific use cases using diverse resources. These guides help you set up and build your local cloud development & testing environment with the help of LocalStack, using local AWS services, integrations, and tools, helping create a mental model for how LocalStack works. For community contributed tutorials, check out the LocalStack Community Tutorials. \ No newline at end of file diff --git a/src/content/docs/aws/tutorials/elb-load-balancing.mdx b/src/content/docs/aws/tutorials/elb-load-balancing.mdx new file mode 100644 index 00000000..64acf699 --- /dev/null +++ b/src/content/docs/aws/tutorials/elb-load-balancing.mdx @@ -0,0 +1,389 @@ +--- +title: "Setting up Elastic Load Balancing (ELB) Application Load Balancers using LocalStack, deployed via the Serverless framework" +description: Learn how to configure Elastic Load Balancing (ELB) Application Load Balancers and set up Node.js Lambda functions as targets. This tutorial demonstrates how to forward requests to the target group for your Lambda function using the Serverless Framework and the `serverless-localstack` plugin to effortlessly deploy and manage your infrastructure locally with LocalStack. +services: +- elb +- lmb +platform: +- JavaScript +deployment: +- serverless +pro: true +leadimage: "elb-load-balancing-featured-image.png" +--- + +[Elastic Load Balancer (ELB)](https://aws.amazon.com/elasticloadbalancing/) is a service that distributes incoming application traffic across multiple targets, such as EC2 instances, containers, IP addresses, and Lambda functions. +ELBs can be physical hardware or virtual software components. +They accept incoming traffic and distribute it across multiple targets in one or more Availability Zones. +Using ELB, you can quickly scale your load balancer to accommodate changes in traffic over time, ensuring optimal performance for your application and workloads running on the AWS infrastructure. + +ELB provides three types of load balancers: [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html), [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html), [Classic Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/gateway/introduction.html), and [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html). + +In this tutorial we focus on the Application Load Balancer (ALB), which operates at the Application layer of the OSI model and is specifically designed for load balancing HTTP and HTTPS traffic for web applications. +ALB works at the request level, allowing advanced load-balancing features for HTTP and HTTPS requests. +It also enables you to register Lambda functions as targets. +You can configure a listener rule that forwards requests to a target group for your Lambda function, triggering its execution to process the request. + +[LocalStack Pro](https://localstack.cloud) extends support for ELB Application Load Balancers and the configuration of target groups, including Lambda functions. +This tutorial will guide you through setting up an ELB Application Load Balancer to configure Node.js Lambda functions as targets. +We will utilize the [Serverless framework](http://serverless.com/) along with the [`serverless-localstack` plugin](https://www.serverless.com/plugins/serverless-localstack) to simplify the setup. +Additionally, we will demonstrate how to set up ELB endpoints to efficiently forward requests to the target group associated with your Lambda functions. + +## Prerequisites + +- LocalStack Pro +- [Serverless framework](https://www.serverless.com/framework/docs/getting-started/) +- [Node.js & `npm`](https://nodejs.org/en/download/) +- [awslocal](https://github.com/localstack/awscli-local) +- [curl](https://curl.se/) and [jq](https://jqlang.github.io/jq/) + +## Setup a Serverless project + +Serverless is an open-source framework that enables you to build, package, and deploy serverless applications seamlessly across various cloud providers and platforms. +With the Serverless framework, you can easily set up your serverless development environment, define your applications as functions and events, and deploy your entire infrastructure to the cloud using a single command. +To start using the Serverless framework, install the Serverless framework globally by executing the following command using `npm`: + +```bash +$ npm install -g serverless +``` + +The above command installs the Serverless framework globally on your machine. +After the installation is complete, you can verify it by running the following command: + +```bash +serverless --version + +Framework Core: 3.24.1 +Plugin: 6.2.2 +SDK: 4.3.2 +``` + +This command displays the version numbers of the Serverless framework's core, plugins, and SDK you installed. +Now, let's proceed with creating a new Serverless project using the `serverless` command: + +```bash +serverless create --template aws-nodejs --path serverless-elb +``` + +In this example, we use the `aws-nodejs` template to create our Serverless project. +This template includes a simple Node.js Lambda function that returns a message when invoked. +It also generates a `serverless.yml` file that contains the project's configuration. + +The `serverless.yml` file is where you configure your project. +It includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. +If you prefer to set up your project using a different template, refer to the [Serverless templates documentation](https://www.serverless.com/framework/docs/providers/aws/cli-reference/create/) for more options. + +Now that we have created our Serverless project, we can proceed to configure it to use LocalStack. + +## Configure Serverless project to use LocalStack + +To configure your Serverless project to use LocalStack, you need to install the `serverless-localstack` plugin. +Before that, let's initialize the project and install some dependencies: + +```bash +npm init -y +npm install -D serverless serverless-localstack serverless-deployment-bucket +``` + +In the above commands, we use `npm init -y` to initialize a new Node.js project with default settings and then install the necessary dependencies, including `serverless`, `serverless-localstack`, and `serverless-deployment-bucket`, as dev dependencies. + +The `serverless-localstack` plugin enables your Serverless project to redirect AWS API calls to LocalStack, while the `serverless-deployment-bucket` plugin creates a deployment bucket in LocalStack. +This bucket is responsible for storing the deployment artifacts and ensuring that old deployment buckets are properly cleaned up after each deployment. + +We have a `serverless.yml` file in the directory to define our Serverless project's configuration, which includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. +To set up the plugins we installed earlier, you need to add the following properties to your `serverless.yml` file: + +```yaml +service: serverless-elb + +frameworkVersion: '3' + +provider: + name: aws + runtime: nodejs12.x + + +functions: + hello: + handler: handler.hello + +plugins: + - serverless-deployment-bucket + - serverless-localstack + +custom: + localstack: + stages: + - local +``` + +To configure Serverless to use the LocalStack plugin specifically for the `local` stage and ensure that your Serverless project only deploys to LocalStack instead of the real AWS Cloud, you need to set the `--stage` flag when using the `serverless deploy` command and specify the flag variable as `local`. + +Configure a `deploy` script in your `package.json` file to simplify the deployment process. +It lets you run the `serverless deploy` command directly over your local infrastructure. +Update your `package.json` file to include the following: + +```json +{ + "name": "serverless-elb", + "version": "1.0.0", + "description": "", + "main": "handler.js", + "scripts": { + "deploy": "sls deploy --stage local" + }, + "keywords": [], + "author": "", + "license": "ISC", + "devDependencies": { + "serverless": "^3.25.0", + "serverless-deployment-bucket": "^1.6.0", + "serverless-localstack": "^1.0.1" + } +} +``` + +With this configuration, you can now run the deployment script using: + +```bash +npm run deploy +``` + +This will execute the `serverless deploy --stage local` command, deploying your Serverless project to LocalStack. + +## Create Lambda functions & ELB Application Load Balancers + +Now, let's create two Lambda functions named `hello1` and `hello2` that will run on the Node.js 12.x runtime. +Open the `handler.js` file and replace the existing code with the following: + +```js +'use strict'; + +module.exports.hello1 = async (event) => { + console.log(event); + return { + "isBase64Encoded": false, + "statusCode": 200, + "statusDescription": "200 OK", + "headers": { + "Content-Type": "text/plain" + }, + "body": "Hello 1" + }; +}; + +module.exports.hello2 = async (event) => { + console.log(event); + return { + "isBase64Encoded": false, + "statusCode": 200, + "statusDescription": "200 OK", + "headers": { + "Content-Type": "text/plain" + }, + "body": "Hello 2" + }; +}; +``` + +We have defined the `hello1` and `hello2` Lambda functions in the updated code. +Each function receives an event parameter and logs it to the console. +The function then returns a response with a status code of 200 and a plain text body containing the respective `"Hello"` message. +It's important to note that the `isBase64Encoded` property is not required for plain text responses. +It is typically used when you need to include binary content in the response body and want to indicate that the content is Base64 encoded. + +Let us now configure the `serverless.yml` file to create an Application Load Balancer (ALB) and attach the Lambda functions to it. + +```yaml +service: serverless-elb + +provider: + name: aws + runtime: nodejs12.x + deploymentBucket: + name: testbucket + +functions: + hello1: + handler: handler.hello1 + events: + - alb: + listenerArn: !Ref HTTPListener + priority: 1 + conditions: + path: /hello1 + hello2: + handler: handler.hello2 + events: + - alb: + listenerArn: !Ref HTTPListener + priority: 2 + conditions: + path: /hello2 + +plugins: + - serverless-deployment-bucket + - serverless-localstack + +custom: + localstack: + stages: + - local +``` + +In the above configuration, we specify the service name (`serverless-elb` in this case) and set the provider to AWS with the Node.js 12.x runtime. +We include the necessary plugins, `serverless-localstack` and `serverless-deployment-bucket`, for LocalStack support and deployment bucket management. +Next, we define the `hello1` and `hello2` functions with their respective handlers and event triggers. +In this example, both functions are triggered by HTTP GET requests to the `/hello1` and `/hello2` paths. + +Lastly, let's create a VPC, a subnet, an Application Load Balancer, and an HTTP listener on the load balancer that redirects traffic to the target group. +To do this, add the following resources to your `serverless.yml` file: + +```yaml +... +resources: + Resources: + LoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: lb-test-1 + Subnets: + - !Ref Subnet + HTTPListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Port: 443 + Host: "#{host}" + LoadBalancerArn: !Ref LoadBalancer + Protocol: HTTP + Subnet: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 12.2.1.0/24 + AvailabilityZone: !Select + - 0 + - Fn::GetAZs: !Ref "AWS::Region" + VPC: + Type: AWS::EC2::VPC + Properties: + EnableDnsSupport: "true" + EnableDnsHostnames: "true" + CidrBlock: 12.2.1.0/24 +``` + +With these resource definitions, you have completed the configuration of your Serverless project. +Now you can create your local AWS infrastructure on LocalStack and deploy your Application Load Balancers with the two Lambda functions as targets. + +## Creating the infrastructure on LocalStack + +Now that we have completed the initial setup let's run LocalStack's AWS emulation on our local machine. +Start LocalStack by running the following command: + +```bash +LOCALSTACK_AUTH_TOKEN= localstack start -d +``` + +This command launches LocalStack in the background, enabling you to use the AWS services locally. +Now, let's deploy our Serverless project and verify the resources created in LocalStack. +Run the following command: + +```bash +npm run deploy +``` + +This command deploys your Serverless project using the "local" stage. +The output will resemble the following: + +```bash +> serverless-elb@1.0.0 deploy +> sls deploy --stage local + +Using serverless-localstack + +Deploying test-elb-load-balancing to stage local (us-east-1) +Creating deployment bucket 'testbucket'... +Using deployment bucket 'testbucket' +Skipping template validation: Unsupported in Localstack + +✔ Service deployed to stack test-elb-load-balancing-local (15s) + +functions: + hello1: test-elb-load-balancing-local-hello1 (157 kB) + hello2: test-elb-load-balancing-local-hello2 (157 kB) +``` + +This output confirms the successful deployment of your Serverless service to the `local` stage in LocalStack. +It also displays information about the deployed Lambda functions (`hello1` and `hello2`). +You can run the following command to verify that the functions and the load balancers have been deployed: + +```bash +awslocal lambda list-functions +{ + "Functions": [ + { + "FunctionName": "test-elb-load-balancing-local-hello1", + "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:test-elb-load-balancing-local-hello1", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::000000000000:role/test-elb-load-balancing-local-us-east-1-lambdaRole", + "Handler": "handler.hello1", + ... + }, + { + "FunctionName": "test-elb-load-balancing-local-hello2", + "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:test-elb-load-balancing-local-hello2", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::000000000000:role/test-elb-load-balancing-local-us-east-1-lambdaRole", + "Handler": "handler.hello2", + ... + } + ] +} + +$ awslocal elbv2 describe-load-balancers +{ + "LoadBalancers": [ + { + "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-east-1:000000000000:loadbalancer/app/lb-test-1/", + "DNSName": "lb-test-1.elb.localhost.localstack.cloud", + "CanonicalHostedZoneId": "", + "CreatedTime": "", + "LoadBalancerName": "lb-test-1", + "Scheme": "None", + ... + } + ] +} +``` + +The ALB endpoints for the two Lambda functions, hello1 and hello2, are accessible at the following URLs: + +- [`http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1`](http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1) +- [`http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2`](http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2) + +To test these endpoints, you can use the curl command along with the jq tool for better formatting. +Run the following commands: + +```bash +curl http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1 | jq +"Hello 1" +curl http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2 | jq +"Hello 2" +``` + +Both commands send an HTTP GET request to the endpoints and uses `jq` to format the response. +The expected outputs are `Hello 1` & `Hello 2`, representing the Lambda functions' response. + +## Conclusion + +In this tutorial, we have learned how to create an Application Load Balancer (ALB) with two Lambda functions as targets using LocalStack. +We have also explored creating, configuring, and deploying a Serverless project with LocalStack. +This enables developers to develop and test Cloud and Serverless applications locally conveniently. + +LocalStack offers integrations with various popular tools such as Terraform, Pulumi, Serverless Application Model (SAM), and more. +For more information about LocalStack integrations, you can refer to our [Integration documentation](). +To further explore and experiment with the concepts covered in this tutorial, you can access the code and resources on our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/elb-load-balancing) along with a `Makefile` for step-by-step execution. diff --git a/src/content/docs/aws/tutorials/index.mdx b/src/content/docs/aws/tutorials/index.mdx new file mode 100644 index 00000000..615e670a --- /dev/null +++ b/src/content/docs/aws/tutorials/index.mdx @@ -0,0 +1,11 @@ +--- +title: Tutorials +description: These tutorials enhance your comprehension of LocalStack's functionality by providing detailed information on how it works for specific use cases using diverse resources. +template: doc +sidebar: + order: 9 +--- + +import DynamicTutorials from '../../../../components/DynamicTutorials.astro'; + + diff --git a/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx new file mode 100644 index 00000000..16446e69 --- /dev/null +++ b/src/content/docs/aws/tutorials/s3-static-website-terraform.mdx @@ -0,0 +1,386 @@ +--- +title: "Host a static website locally using Simple Storage Service (S3) and Terraform with LocalStack" +description: > + Host a static website using a Simple Storage Service (S3) bucket to serve static content by provisioning the infrastructure using Terraform in LocalStack. Learn how to configure S3 buckets locally for testing and integration, and make use of LocalStack's S3 API & `tflocal` CLI to provision infrastructure locally. +services: +- s3 +platform: +- html +deployment: +- terraform +pro: false +leadimage: "s3-static-website-terraform-featured-image.png" +--- + +[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is a proprietary object storage solution that can store an unlimited number of objects for many use cases. +S3 is a highly scalable, durable and reliable service that we can use for various use cases: hosting a static site, handling big data analytics, managing application logs, storing web assets and much more! + +With S3, you have unlimited storage with your data stored in buckets. +A bucket refers to a directory, while an object is just another term for a file. +Every object (file) stores the name of the file (key), the contents (value), a version ID and the associated metadata. +You can also use S3 to host a static website, to serve static content. +It might include HTML, CSS, JavaScript, images, and other assets that make up your website. + +LocalStack supports the S3 API, which means you can use the same API calls to interact with S3 in LocalStack as you would with AWS. +Using LocalStack, you can create and manage S3 buckets and objects locally, use AWS SDKs and third-party integrations to work with S3, and test your applications without making any significant alterations. +LocalStack also supports the creation of S3 buckets with static website hosting enabled. + +In this tutorial, we will deploy a static website using an S3 bucket over a locally emulated AWS infrastructure on LocalStack. +We will use Terraform to automate the creation & management of AWS resources by declaring them in the HashiCorp Configuration Language (HCL). +We will also learn about `tflocal`, a CLI wrapper created by LocalStack, that allows you to run Terraform locally against LocalStack. + +## Prerequisites + +For this tutorial, you will need: + +- [LocalStack Community](https://github.com/localstack/localstack) +- [Terraform](https://www.terraform.io/downloads.html) +- [awslocal](https://github.com/localstack/awscli-local) + +## Creating a static website + +We will create a simple static website using plain HTML to get started. +To create a static website deployed over S3, we need to create an index document and a custom error document. +We will name our index document `index.html` and our error document `error.html`. +Optionally, you can create a folder called `assets` to store images and other assets. + +Let's create a directory named `s3-static-website-localstack` where we'll store our static website files. +If you don't have an `index.html` file, you can use the following code to create one: + +```html + + + + + + Static Website + + +

Static Website deployed locally over S3 using LocalStack

+ + +``` + +S3 will serve this file when a user visits the root URL of your static website, serving as the default page. +In a similar fashion, you can configure a custom error document that contains a user-friendly error message. +Let's create a file named `error.html` and add the following code: + +```html + + + + + 404 + + +

Something is amiss.

+ + +``` + +S3 will return the above file content only for HTTP 4XX error codes. +Some browsers might choose to display their custom error message if a user tries to access a resource that does not exist. +In this case, browsers might ignore the above error document. +With the initial setup complete, we can now move on to creating a static website using S3 via `awslocal`, LocalStack's wrapper for the AWS CLI. + +## Hosting a static website using S3 + +To create a static website using S3, we need to create a bucket, enable static website hosting, and upload the files to the bucket. +We will use the `awslocal` CLI for these operations. +Navigate to the root directory of the project and create a bucket named `testwebsite` using LocalStack's S3 API: + +```bash +awslocal s3api create-bucket --bucket testwebsite +``` + +With the bucket created, we can now attach a policy to it to allow public access and its contents. +Let's create a file named `bucket_policy.json` in the root directory and add the following code: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadGetObject", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::testwebsite/*" + } + ] +} +``` + +Let's now attach the policy to the bucket: + +```bash +awslocal s3api put-bucket-policy --bucket testwebsite --policy file://bucket_policy.json +``` + +With the policy attached, we can now sync the contents of our root directory to the bucket: + +```bash +awslocal s3 sync ./ s3://testwebsite +``` + +We'll now enable static website hosting on the bucket and configure the index and error documents: + +```bash +awslocal s3 website s3://testwebsite/ --index-document index.html --error-document error.html +``` + +If you are deploying a static website using S3 on real AWS cloud, your S3 website endpoint will follow one of these two formats: + +- `http://.s3-website-.amazonaws.com` +- `http://.s3-website..amazonaws.com` + +In LocalStack, the S3 website endpoint follows the following format: `http://.s3-website.localhost.localstack.cloud:4566`. +You can navigate to [`http://testwebsite.s3-website.localhost.localstack.cloud:4566/`](http://testwebsite.s3-website.localhost.localstack.cloud:4566/) to view your static website. + +## Orchestrating infrastructure using Terraform + +You can automate the above process by orchestrating your AWS infrastructure using Terraform. +Terraform is an infrastructure as code (IaC) tool that allows you to create, manage, and version your infrastructure. +Terraform uses a declarative configuration language called HashiCorp Configuration Language (HCL) to describe your infrastructure. + +Before that, we would need to manually configure the local service endpoints and credentials for Terraform to integrate with LocalStack. +We will use the [AWS Provider for Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to interact with the many resources supported by AWS in LocalStack. +Create a new file named `provider.tf` and specify mock credentials for the AWS provider: + +```hcl +provider "aws" { + region = "us-east-1" + access_key = "fake" + secret_key = "fake" +} +``` + +We would also need to avoid issues with routing and authentication (as we do not need it). +Therefore we need to supply some general parameters. +Additionally, we have to point the individual services to LocalStack. +We can do this by specifying the `endpoints` parameter for each service, that we intend to use. +Our `provider.tf` file should look like this: + +```hcl +provider "aws" { + access_key = "test" + secret_key = "test" + region = "us-east-1" + + # only required for non virtual hosted-style endpoint use case. + # https://registry.terraform.io/providers/hashicorp/aws/latest/docs#s3_force_path_style + s3_use_path_style = false + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + + endpoints { + s3 = "http://s3.localhost.localstack.cloud:4566" + } +} +``` + +:::note +We use `localhost.localstack.cloud` as the recommended endpoint for the S3 to enable host-based bucket endpoints. +Users can rely on the `localhost.localstack.cloud` domain to be publicly resolvable. +We also publish an SSL certificate which is automatically used inside LocalStack to enable HTTPS endpoints with valid certificates. +For most of the other services, it is fine to use `localhost:4566`. +::: + +With the provider configured, we can now configure the variables for our S3 bucket. +Create a new file named `variables.tf` and add the following code: + +```hcl +variable "bucket_name" { + description = "Name of the s3 bucket. Must be unique." + type = string +} + +variable "tags" { + description = "Tags to set on the bucket." + type = map(string) + default = {} +} +``` + +We take a user input for the bucket name and tags. +Next, we will define the output variables for our Terraform configuration. +Create a new file named `outputs.tf` and add the following code: + +```hcl +output "arn" { + description = "ARN of the bucket" + value = aws_s3_bucket.s3_bucket.arn +} + +output "name" { + description = "Name (id) of the bucket" + value = aws_s3_bucket.s3_bucket.id +} + +output "domain" { + description = "Domain name of the bucket" + value = aws_s3_bucket_website_configuration.s3_bucket.website_domain +} + +output "website_endpoint" { + value = aws_s3_bucket_website_configuration.s3_bucket.website_endpoint +} +``` + +The output variables are the ARN, name, domain name, and website endpoint of the bucket. +With all the configuration files in place, we can now create the S3 bucket. +Create a new file named `main.tf` and create the S3 bucket using the following code: + +```hcl +resource "aws_s3_bucket" "s3_bucket" { + bucket = var.bucket_name + tags = var.tags +} +``` + +To configure the static website hosting, we will use the `aws_s3_bucket_website_configuration` resource. +Add the following code to the `main.tf` file: + +```hcl +resource "aws_s3_bucket_website_configuration" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + + index_document { + suffix = "index.html" + } + + error_document { + key = "error.html" + } + +} +``` + +To set the bucket policy, we will use the `aws_s3_bucket_policy` resource. +Add the following code to the `main.tf` file: + +```hcl +resource "aws_s3_bucket_acl" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + acl = "public-read" +} + +resource "aws_s3_bucket_policy" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "PublicReadGetObject" + Effect = "Allow" + Principal = "*" + Action = "s3:GetObject" + Resource = [ + aws_s3_bucket.s3_bucket.arn, + "${aws_s3_bucket.s3_bucket.arn}/*", + ] + }, + ] + }) +} +``` + +In the above code, we are setting the ACL of the bucket to `public-read` and setting the bucket policy to allow public access to the bucket. +Pick up an appropriate policy based on your use case. +Let's use the `aws_s3_object` resource to upload the files to the bucket. +Add the following code to the `main.tf` file: + +```hcl +resource "aws_s3_object" "object_www" { + depends_on = [aws_s3_bucket.s3_bucket] + for_each = fileset("${path.root}", "*.html") + bucket = var.bucket_name + key = basename(each.value) + source = each.value + etag = filemd5("${each.value}") + content_type = "text/html" + acl = "public-read" +} +``` + +The above code uploads all our html files to the bucket. +We are also setting the ACL of the files to `public-read`. +Optionally, if you have static assets like images, CSS, and JavaScript files, you can upload them to the bucket using the same `aws_s3_bucket_object` resource by adding the following code to the `main.tf` file: + +```hcl +resource "aws_s3_object" "object_assets" { + depends_on = [aws_s3_bucket.s3_bucket] + for_each = fileset(path.module, "assets/*") + bucket = var.bucket_name + key = each.value + source = "${each.value}" + etag = filemd5("${each.value}") + acl = "public-read" +} +``` + +With all the configuration files in place, we can now initialize the Terraform configuration. +Run the following command to initialize the Terraform configuration: + +```bash +terraform init + +... +Terraform has been successfully initialized! +... +``` + +We can create an execution plan based on our Terraform configuration for the AWS resources. +Run the following command to create an execution plan: + +```bash +terraform plan +``` + +Finally, we can apply the Terraform configuration to create the AWS resources. +Run the following command to apply the Terraform configuration: + +```bash +terraform apply + +var.bucket_name + Name of the s3 bucket. +Must be unique. + + Enter a value: testbucket +... +arn = "arn:aws:s3:::testbucket" +domain = "s3-website-us-east-1.amazonaws.com" +name = "testbucket" +website_endpoint = "testbucket.s3-website-us-east-1.amazonaws.com" +``` + +In the above command, we specified `testbucket` as the bucket name. +You can specify any bucket name since LocalStack is ephemeral, and stopping your LocalStack container will delete all the created resources. +The above command output includes the ARN, name, domain name, and website endpoint of the bucket. +You can see the `website_endpoint` configured to use AWS S3 Website Endpoint. +You can now access the website using the bucket name in the following format: `http://.s3-website.localhost.localstack.cloud:4566`. +Since the endpoint is configured to use `localhost.localstack.cloud`, no real AWS resources have been created. + +You can optionally use the `tflocal` CLI as a drop-in replacement for the official Terraform CLI. `tflocal` uses the Terraform Override mechanism to create a temporary `localstack_providers_override.tf` file, which is deleted after the infrastructure is created. +It mitigates the need to create the `provider.tf` file manually. +You can use `tflocal` to create the infrastructure by running the following commands: + +```bash +tflocal init +tflocal plan +tflocal apply +``` + +## Conclusion + +In this tutorial, we have seen how to use LocalStack to create an S3 bucket and configure it to serve a static website. +We have also seen how you can use Terraform to provision AWS infrastructure in an emulated local environment using LocalStack. +You can use the [LocalStack App](https://app.localstack.cloud) to view the created buckets and files on the LocalStack Resource dashboard for S3 and upload more files or perform other operations on the bucket. +Using LocalStack, you can perform various operations using emulated S3 buckets and other AWS services without creating any real AWS resources. + +The code for this tutorial can be found in our [LocalStack Terraform samples over GitHub](https://github.com/localstack/localstack-terraform-samples/tree/master/s3-static-website). +Please make sure to adjust the paths for the HTML files in `main.tf`. +Further documentation for S3 is available on our [S3 documentation](/aws/services/s3). From d738770bfee999e097b4eca9bdae39e8e12b31fd Mon Sep 17 00:00:00 2001 From: HarshCasper Date: Thu, 19 Jun 2025 23:09:16 +0530 Subject: [PATCH 06/10] fix issue with opening the tutorial --- src/components/DynamicTutorials.astro | 2 +- src/components/tutorials/TutorialsShowcase.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/components/DynamicTutorials.astro b/src/components/DynamicTutorials.astro index fadb0dd9..7fe4b204 100644 --- a/src/components/DynamicTutorials.astro +++ b/src/components/DynamicTutorials.astro @@ -14,7 +14,7 @@ const allTutorials = await getCollection('docs', ({ id }) => { const tutorialData = allTutorials.map(tutorial => { const title = tutorial.data.title || 'Unknown Tutorial'; const description = tutorial.data.description || `Tutorial: ${title}`; - const slug = tutorial.slug ? tutorial.slug.replace('aws/tutorials/', '') : ''; + const slug = tutorial.id; // Use id instead of slug for proper path return { title, diff --git a/src/components/tutorials/TutorialsShowcase.tsx b/src/components/tutorials/TutorialsShowcase.tsx index 6903f698..4dd7a07c 100644 --- a/src/components/tutorials/TutorialsShowcase.tsx +++ b/src/components/tutorials/TutorialsShowcase.tsx @@ -62,7 +62,7 @@ const TutorialCard: React.FC<{
Read Tutorial → From 9fbc7481cdedbdfa5c41d640593588a33cc22f38 Mon Sep 17 00:00:00 2001 From: HarshCasper Date: Thu, 19 Jun 2025 23:20:40 +0530 Subject: [PATCH 07/10] make filtering work --- .../tutorials/TutorialsShowcase.tsx | 21 ++++++++++++++----- src/content.config.ts | 15 +++++++++++-- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/components/tutorials/TutorialsShowcase.tsx b/src/components/tutorials/TutorialsShowcase.tsx index 4dd7a07c..b11a4597 100644 --- a/src/components/tutorials/TutorialsShowcase.tsx +++ b/src/components/tutorials/TutorialsShowcase.tsx @@ -96,7 +96,9 @@ export const TutorialsShowcase: React.FC = ({ }, [tutorials, services]); const uniquePlatforms = useMemo(() => { - const allPlatforms = new Set(tutorials.flatMap(tutorial => tutorial.platform)); + const allPlatforms = new Set(tutorials.flatMap(tutorial => + tutorial.platform.map(p => p.toLowerCase()) // Convert to lowercase + )); return Array.from(allPlatforms).sort((a, b) => (platforms[a] || a).localeCompare(platforms[b] || b)); }, [tutorials, platforms]); @@ -121,7 +123,7 @@ export const TutorialsShowcase: React.FC = ({ // Other filters if (filters.services.length > 0 && !filters.services.some(service => tutorial.services.includes(service))) return false; - if (filters.platforms.length > 0 && !filters.platforms.some(platform => tutorial.platform.includes(platform))) return false; + if (filters.platforms.length > 0 && !filters.platforms.some(platform => tutorial.platform.map(p => p.toLowerCase()).includes(platform))) return false; if (filters.deployments.length > 0 && !filters.deployments.some(deployment => tutorial.deployment.includes(deployment))) return false; if (filters.showProOnly && !tutorial.pro) return false; @@ -510,7 +512,10 @@ export const TutorialsShowcase: React.FC = ({ e.target.value ? toggleFilter('platforms', e.target.value) : null} + onChange={(e) => setFilters(prev => ({ + ...prev, + platforms: e.target.value ? [e.target.value] : [] + }))} className="filter-select" > @@ -536,7 +544,10 @@ export const TutorialsShowcase: React.FC = ({