$ version: '3.8'
services: localstack: container_name: localstack_main image: localstack/localstack:latest ports: - "4510-4599:4510-4599" # For legacy ports - "4566:4566" # New default port environment: - SERVICES=s3,dynamodb,lambda,sqs,sns # Specify services - DOCKER_HOST=unix:///var/run/-weight: 500;">docker.sock - # LOCALSTACK_API_KEY=YOUR_KEY_HERE (if you want to try free tier after registering) volumes: - "./.localstack:/var/lib/localstack" - "/var/run/-weight: 500;">docker.sock:/var/run/-weight: 500;">docker.sock"
version: '3.8'
services: localstack: container_name: localstack_main image: localstack/localstack:latest ports: - "4510-4599:4510-4599" # For legacy ports - "4566:4566" # New default port environment: - SERVICES=s3,dynamodb,lambda,sqs,sns # Specify services - DOCKER_HOST=unix:///var/run/-weight: 500;">docker.sock - # LOCALSTACK_API_KEY=YOUR_KEY_HERE (if you want to try free tier after registering) volumes: - "./.localstack:/var/lib/localstack" - "/var/run/-weight: 500;">docker.sock:/var/run/-weight: 500;">docker.sock"
version: '3.8'
services: localstack: container_name: localstack_main image: localstack/localstack:latest ports: - "4510-4599:4510-4599" # For legacy ports - "4566:4566" # New default port environment: - SERVICES=s3,dynamodb,lambda,sqs,sns # Specify services - DOCKER_HOST=unix:///var/run/-weight: 500;">docker.sock - # LOCALSTACK_API_KEY=YOUR_KEY_HERE (if you want to try free tier after registering) volumes: - "./.localstack:/var/lib/localstack" - "/var/run/-weight: 500;">docker.sock:/var/run/-weight: 500;">docker.sock"
version: '3.8'
services: # DynamoDB Local dynamodb-local: image: amazon/dynamodb-local:latest container_name: dynamodb_local ports: - "8000:8000" command: "-jar DynamoDBLocal.jar -sharedDb -dbPath /home/dynamodblocal/data" volumes: - ./dynamodbd-data:/home/dynamodblocal/data healthcheck: test: ["CMD-SHELL", "-weight: 500;">curl -f http://localhost:8000 || exit 1"] interval: 10s timeout: 5s retries: 5 # MinIO for S3 minio: image: minio/minio container_name: minio_s3 ports: - "9000:9000" # API port - "9001:9001" # Console port environment: MINIO_ROOT_USER: minioadmin MINIO_ROOT_PASSWORD: minioadmin command: server /data --console-address ":9001" volumes: - ./minio-data:/data healthcheck: test: ["CMD", "-weight: 500;">curl", "-f", "http://localhost:9000/minio/health/live"] interval: 10s timeout: 5s retries: 5 # Moto for other services (SQS, SNS, etc.) moto: image: ghcr.io/getmoto/moto:latest # Using a community Moto image container_name: moto_server ports: - "5000:5000" environment: MOTO_SERVICES: s3:5000,sqs:5000,sns:5000,lambda:5000 # Specify services (Moto can handle S3, but MinIO is often better for direct S3) # MOTO_HOST: 0.0.0.0 # Often not needed if image is configured correctly healthcheck: test: ["CMD-SHELL", "-weight: 500;">curl -f http://localhost:5000/moto-api || exit 1"] interval: 10s timeout: 5s retries: 5
version: '3.8'
services: # DynamoDB Local dynamodb-local: image: amazon/dynamodb-local:latest container_name: dynamodb_local ports: - "8000:8000" command: "-jar DynamoDBLocal.jar -sharedDb -dbPath /home/dynamodblocal/data" volumes: - ./dynamodbd-data:/home/dynamodblocal/data healthcheck: test: ["CMD-SHELL", "-weight: 500;">curl -f http://localhost:8000 || exit 1"] interval: 10s timeout: 5s retries: 5 # MinIO for S3 minio: image: minio/minio container_name: minio_s3 ports: - "9000:9000" # API port - "9001:9001" # Console port environment: MINIO_ROOT_USER: minioadmin MINIO_ROOT_PASSWORD: minioadmin command: server /data --console-address ":9001" volumes: - ./minio-data:/data healthcheck: test: ["CMD", "-weight: 500;">curl", "-f", "http://localhost:9000/minio/health/live"] interval: 10s timeout: 5s retries: 5 # Moto for other services (SQS, SNS, etc.) moto: image: ghcr.io/getmoto/moto:latest # Using a community Moto image container_name: moto_server ports: - "5000:5000" environment: MOTO_SERVICES: s3:5000,sqs:5000,sns:5000,lambda:5000 # Specify services (Moto can handle S3, but MinIO is often better for direct S3) # MOTO_HOST: 0.0.0.0 # Often not needed if image is configured correctly healthcheck: test: ["CMD-SHELL", "-weight: 500;">curl -f http://localhost:5000/moto-api || exit 1"] interval: 10s timeout: 5s retries: 5
version: '3.8'
services: # DynamoDB Local dynamodb-local: image: amazon/dynamodb-local:latest container_name: dynamodb_local ports: - "8000:8000" command: "-jar DynamoDBLocal.jar -sharedDb -dbPath /home/dynamodblocal/data" volumes: - ./dynamodbd-data:/home/dynamodblocal/data healthcheck: test: ["CMD-SHELL", "-weight: 500;">curl -f http://localhost:8000 || exit 1"] interval: 10s timeout: 5s retries: 5 # MinIO for S3 minio: image: minio/minio container_name: minio_s3 ports: - "9000:9000" # API port - "9001:9001" # Console port environment: MINIO_ROOT_USER: minioadmin MINIO_ROOT_PASSWORD: minioadmin command: server /data --console-address ":9001" volumes: - ./minio-data:/data healthcheck: test: ["CMD", "-weight: 500;">curl", "-f", "http://localhost:9000/minio/health/live"] interval: 10s timeout: 5s retries: 5 # Moto for other services (SQS, SNS, etc.) moto: image: ghcr.io/getmoto/moto:latest # Using a community Moto image container_name: moto_server ports: - "5000:5000" environment: MOTO_SERVICES: s3:5000,sqs:5000,sns:5000,lambda:5000 # Specify services (Moto can handle S3, but MinIO is often better for direct S3) # MOTO_HOST: 0.0.0.0 # Often not needed if image is configured correctly healthcheck: test: ["CMD-SHELL", "-weight: 500;">curl -f http://localhost:5000/moto-api || exit 1"] interval: 10s timeout: 5s retries: 5
// src/awsConfig.js
const AWS = require('aws-sdk'); const isLocal = process.env.NODE_ENV === 'development' || process.env.IS_OFFLINE; const getAwsConfig = (serviceName) => { if (!isLocal) { return {}; // Use default AWS configuration (production) } // Configuration for local development switch (serviceName) { case 'S3': return { endpoint: 'http://localhost:9000', // MinIO endpoint s3ForcePathStyle: true, // Required for MinIO accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin', region: 'us-east-1', // Can be any region for local }; case 'DynamoDB': return { endpoint: 'http://localhost:8000', // DynamoDB Local endpoint region: 'us-east-1', // Can be any region for local accessKeyId: 'fakeAccessKey', // Not used by DynamoDB Local, but SDK needs it secretAccessKey: 'fakeSecretKey', // Same here }; case 'SQS': case 'SNS': case 'Lambda': return { endpoint: 'http://localhost:5000', // Moto endpoint region: 'us-east-1', accessKeyId: 'motoAccessKey', secretAccessKey: 'motoSecretKey', }; default: return {}; // Fallback for other services if needed }
}; module.exports = { S3: new AWS.S3(getAwsConfig('S3')), DynamoDB: new AWS.DynamoDB(getAwsConfig('DynamoDB')), DocumentClient: new AWS.DynamoDB.DocumentClient(getAwsConfig('DynamoDB')), SQS: new AWS.SQS(getAwsConfig('SQS')), SNS: new AWS.SNS(getAwsConfig('SNS')), Lambda: new AWS.Lambda(getAwsConfig('Lambda')),
};
// src/awsConfig.js
const AWS = require('aws-sdk'); const isLocal = process.env.NODE_ENV === 'development' || process.env.IS_OFFLINE; const getAwsConfig = (serviceName) => { if (!isLocal) { return {}; // Use default AWS configuration (production) } // Configuration for local development switch (serviceName) { case 'S3': return { endpoint: 'http://localhost:9000', // MinIO endpoint s3ForcePathStyle: true, // Required for MinIO accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin', region: 'us-east-1', // Can be any region for local }; case 'DynamoDB': return { endpoint: 'http://localhost:8000', // DynamoDB Local endpoint region: 'us-east-1', // Can be any region for local accessKeyId: 'fakeAccessKey', // Not used by DynamoDB Local, but SDK needs it secretAccessKey: 'fakeSecretKey', // Same here }; case 'SQS': case 'SNS': case 'Lambda': return { endpoint: 'http://localhost:5000', // Moto endpoint region: 'us-east-1', accessKeyId: 'motoAccessKey', secretAccessKey: 'motoSecretKey', }; default: return {}; // Fallback for other services if needed }
}; module.exports = { S3: new AWS.S3(getAwsConfig('S3')), DynamoDB: new AWS.DynamoDB(getAwsConfig('DynamoDB')), DocumentClient: new AWS.DynamoDB.DocumentClient(getAwsConfig('DynamoDB')), SQS: new AWS.SQS(getAwsConfig('SQS')), SNS: new AWS.SNS(getAwsConfig('SNS')), Lambda: new AWS.Lambda(getAwsConfig('Lambda')),
};
// src/awsConfig.js
const AWS = require('aws-sdk'); const isLocal = process.env.NODE_ENV === 'development' || process.env.IS_OFFLINE; const getAwsConfig = (serviceName) => { if (!isLocal) { return {}; // Use default AWS configuration (production) } // Configuration for local development switch (serviceName) { case 'S3': return { endpoint: 'http://localhost:9000', // MinIO endpoint s3ForcePathStyle: true, // Required for MinIO accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin', region: 'us-east-1', // Can be any region for local }; case 'DynamoDB': return { endpoint: 'http://localhost:8000', // DynamoDB Local endpoint region: 'us-east-1', // Can be any region for local accessKeyId: 'fakeAccessKey', // Not used by DynamoDB Local, but SDK needs it secretAccessKey: 'fakeSecretKey', // Same here }; case 'SQS': case 'SNS': case 'Lambda': return { endpoint: 'http://localhost:5000', // Moto endpoint region: 'us-east-1', accessKeyId: 'motoAccessKey', secretAccessKey: 'motoSecretKey', }; default: return {}; // Fallback for other services if needed }
}; module.exports = { S3: new AWS.S3(getAwsConfig('S3')), DynamoDB: new AWS.DynamoDB(getAwsConfig('DynamoDB')), DocumentClient: new AWS.DynamoDB.DocumentClient(getAwsConfig('DynamoDB')), SQS: new AWS.SQS(getAwsConfig('SQS')), SNS: new AWS.SNS(getAwsConfig('SNS')), Lambda: new AWS.Lambda(getAwsConfig('Lambda')),
};
plugins: - serverless-offline custom: serverless-offline: # Set this to true to indicate to your code that it's running locally # which will then use the local endpoints from awsConfig.js port: 3000 host: 0.0.0.0 disableCookieValidation: true
plugins: - serverless-offline custom: serverless-offline: # Set this to true to indicate to your code that it's running locally # which will then use the local endpoints from awsConfig.js port: 3000 host: 0.0.0.0 disableCookieValidation: true
plugins: - serverless-offline custom: serverless-offline: # Set this to true to indicate to your code that it's running locally # which will then use the local endpoints from awsConfig.js port: 3000 host: 0.0.0.0 disableCookieValidation: true - Account Requirement: Even for what used to be considered the LocalStack free tier, certain features now prompt you to sign in or get an API key. This disrupts automated workflows and forces registration.
- Archived GitHub Repository: The localstack/localstack GitHub repo is now archived. This sends a clear signal: the primary channel for community contribution and rapid open-source fixes is gone. You're now expected to mostly use their official channels, which often means their paid offerings. - Truly open source, active development.
- Excellent for Python applications and testing.
- Can run standalone or within a WSGI container.
- Supports many services (S3, DynamoDB, SQS, SNS, Lambda, etc.). - Not ideal for non-Python applications without proxying.
- Doesn't run actual AWS -weight: 500;">service binaries; it's a mock.
- No easy Docker image provided by the project itself, but community images exist. - DynamoDB Local: Official, JAR-based. Rock solid. Use this for DynamoDB.
- MinIO: Open-source, S3-compatible object storage. If you just need S3 buckets and objects, MinIO is fantastic. Fast, reliable, and truly S3-API compatible. - Highly stable and reliable.
- True API compatibility (especially DynamoDB Local).
- Independent, so one failing -weight: 500;">service doesn't take down everything.
- No account required, ever. - Requires managing separate containers or processes.
- You need to know which services you actually need locally. - Serverless Offline: A plugin for the Serverless Framework. It simulates API Gateway and Lambda executions directly on your machine. Great for Node.js, Python, Ruby functions.
- AWS SAM CLI: The official AWS Serverless Application Model CLI. It can run Lambda functions locally in a Docker container, mimicking the real AWS Lambda environment closely. Good for debugging and testing. - Excellent for serverless functions and API Gateway.
- Serverless Offline is super fast for local dev.
- SAM CLI gives high fidelity to actual Lambda runtime. - Only covers Lambda/API Gateway. You'll need other tools for S3, DynamoDB, etc. - Python-centric testing (mocking): Moto
- Persistent data stores (S3, DynamoDB): MinIO + DynamoDB Local
- Lambda/API Gateway development: Serverless Offline / AWS SAM CLI - We're running amazon/dynamodb-local on port 8000. It shares a database instance (-sharedDb) and persists data to ./dynamodbd-data.
- minio/minio runs on port 9000 for the S3 API and 9001 for its console. Data persists to ./minio-data.
- ghcr.io/getmoto/moto:latest is a community-maintained Docker image for Moto, running on port 5000. You define the services it should mock via MOTO_SERVICES. I've included S3, SQS, SNS, and Lambda here, but for S3, MinIO is usually a more robust choice. You can -weight: 500;">remove s3 from MOTO_SERVICES if you prefer MinIO for S3. - This central awsConfig.js file checks if you're in a local development environment.
- It then returns specific endpoint configurations for each -weight: 500;">service, pointing them to our Docker containers.
- s3ForcePathStyle: true for S3 (MinIO) is critical. Without it, the AWS SDK tries to use virtual-hosted style URLs (e.g., bucket.localhost:9000), which MinIO might not handle correctly by default depending on your setup. Path-style means localhost:9000/bucket.
- Access keys for DynamoDB Local and Moto can be dummy values, as they don't actually authenticate. MinIO does use its configured credentials. - Thinking the "Free" API Key was a complete solution: The Error: I got weird, intermittent errors like "LocalStack Pro features not available for the current account", even for services that should have been free. Or sometimes, the -weight: 500;">service would just hang. My -weight: 500;">docker-compose would spin up, but AWS CLI commands would just time out. The Fix: Realized I was just delaying the inevitable. The "free" tier still felt constrained and unstable for complex CI/CD setups or even just prolonged local dev. It was better to completely decouple. I removed the LOCALSTACK_API_KEY environment variable and focused on truly independent LocalStack alternatives.
- The Error: I got weird, intermittent errors like "LocalStack Pro features not available for the current account", even for services that should have been free. Or sometimes, the -weight: 500;">service would just hang. My -weight: 500;">docker-compose would spin up, but AWS CLI commands would just time out.
- The Fix: Realized I was just delaying the inevitable. The "free" tier still felt constrained and unstable for complex CI/CD setups or even just prolonged local dev. It was better to completely decouple. I removed the LOCALSTACK_API_KEY environment variable and focused on truly independent LocalStack alternatives.
- Assuming an archived repo meant a dead project: The Error: When I saw LocalStack GitHub archived, I thought, "Well, that's it, the project is dying." I almost abandoned it completely without checking if there were still viable uses. The Fix: While the main repo for community contributions is gone, the product itself is still developed. It's just a different model. However, for a senior dev, relying on a closed-source core for local emulation felt risky, reinforcing the need for open-source LocalStack alternatives like Moto or standalone services for most projects.
- The Error: When I saw LocalStack GitHub archived, I thought, "Well, that's it, the project is dying." I almost abandoned it completely without checking if there were still viable uses.
- The Fix: While the main repo for community contributions is gone, the product itself is still developed. It's just a different model. However, for a senior dev, relying on a closed-source core for local emulation felt risky, reinforcing the need for open-source LocalStack alternatives like Moto or standalone services for most projects.
- Mixing endpoint configurations haphazardly: The Error: My app would make S3 calls and say UnknownEndpoint: Inaccessible host: 'localhost'. This -weight: 500;">service may not be available in the 'us-east-1' region. or similar. I'd check the MinIO container, it was running fine. The Fix: The actual issue was typically s3ForcePathStyle: true being missing or misconfigured for MinIO. Or, for DynamoDB, I forgot to provide any accessKeyId/secretAccessKey, even dummy ones, which the SDK sometimes expects for initialization even when not used by the local -weight: 500;">service. Always double-check your SDK's configuration for specific local endpoints. Every -weight: 500;">service needs its own explicit endpoint setting.
- The Error: My app would make S3 calls and say UnknownEndpoint: Inaccessible host: 'localhost'. This -weight: 500;">service may not be available in the 'us-east-1' region. or similar. I'd check the MinIO container, it was running fine.
- The Fix: The actual issue was typically s3ForcePathStyle: true being missing or misconfigured for MinIO. Or, for DynamoDB, I forgot to provide any accessKeyId/secretAccessKey, even dummy ones, which the SDK sometimes expects for initialization even when not used by the local -weight: 500;">service. Always double-check your SDK's configuration for specific local endpoints. Every -weight: 500;">service needs its own explicit endpoint setting. - The Error: I got weird, intermittent errors like "LocalStack Pro features not available for the current account", even for services that should have been free. Or sometimes, the -weight: 500;">service would just hang. My -weight: 500;">docker-compose would spin up, but AWS CLI commands would just time out.
- The Fix: Realized I was just delaying the inevitable. The "free" tier still felt constrained and unstable for complex CI/CD setups or even just prolonged local dev. It was better to completely decouple. I removed the LOCALSTACK_API_KEY environment variable and focused on truly independent LocalStack alternatives. - The Error: When I saw LocalStack GitHub archived, I thought, "Well, that's it, the project is dying." I almost abandoned it completely without checking if there were still viable uses.
- The Fix: While the main repo for community contributions is gone, the product itself is still developed. It's just a different model. However, for a senior dev, relying on a closed-source core for local emulation felt risky, reinforcing the need for open-source LocalStack alternatives like Moto or standalone services for most projects. - The Error: My app would make S3 calls and say UnknownEndpoint: Inaccessible host: 'localhost'. This -weight: 500;">service may not be available in the 'us-east-1' region. or similar. I'd check the MinIO container, it was running fine.
- The Fix: The actual issue was typically s3ForcePathStyle: true being missing or misconfigured for MinIO. Or, for DynamoDB, I forgot to provide any accessKeyId/secretAccessKey, even dummy ones, which the SDK sometimes expects for initialization even when not used by the local -weight: 500;">service. Always double-check your SDK's configuration for specific local endpoints. Every -weight: 500;">service needs its own explicit endpoint setting. - Port Conflicts: Running multiple services means more ports. Keep your -weight: 500;">docker-compose.yml clean and use tools like lsof -i :<port> to troubleshoot conflicts.
- Data Persistence: Always mount volumes (like ./minio-data:/data) for your local services. You don't want your data disappearing every time you -weight: 500;">restart a container. This is crucial for consistent local AWS development without LocalStack data loss.
- Parity with Real AWS: Remember, Moto is a mock, and MinIO/DynamoDB Local are emulators. They are close to AWS, but not 100% identical. Edge cases, specific IAM policies, or advanced features might behave differently. Always test against real AWS for your staging/production builds.
- Test Data Management: When running local services, you'll need scripts to seed initial data for your tests. For DynamoDB Local, you can write simple Node.js scripts to create tables and insert items using the DocumentClient.
- Resource Naming: For services like SQS/SNS, remember that the "region" in your local config often doesn't matter, but having consistent resource names (queue names, topic names) with your real AWS setup helps reduce migration headaches.
- CI/CD Integration: The -weight: 500;">docker-compose setup fits perfectly into CI/CD pipelines. Just ensure your CI environment has Docker available, and run -weight: 500;">docker-compose up -d before your tests. This avoids the headaches of LocalStack free tier limitations or API key management in CI.