diff --git a/cfn/cleanup-bucket.sh b/cfn/cleanup-bucket.sh new file mode 100755 index 00000000..38c6d712 --- /dev/null +++ b/cfn/cleanup-bucket.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Empty and delete the shared tutorial S3 bucket, then delete the CloudFormation stack. +# Usage: ./cfn/cleanup-bucket.sh +set -eo pipefail + +STACK_NAME="tutorial-prereqs-bucket" + +BUCKET_NAME=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ + --query 'Stacks[0].Outputs[?OutputKey==`BucketName`].OutputValue' --output text 2>/dev/null) + +if [ -z "$BUCKET_NAME" ] || [ "$BUCKET_NAME" = "None" ]; then + echo "No bucket stack found." + exit 0 +fi + +echo "Bucket: $BUCKET_NAME" +echo "" +echo "Contents:" +aws s3 ls "s3://$BUCKET_NAME/" 2>/dev/null || echo " (empty)" +echo "" +echo "This will permanently delete all objects and the bucket itself." +read -rp "Type the bucket name to confirm: " CONFIRM + +if [ "$CONFIRM" != "$BUCKET_NAME" ]; then + echo "Bucket name does not match. Aborting." + exit 1 +fi + +echo "" +echo "Emptying bucket..." +aws s3 rm "s3://$BUCKET_NAME" --recursive --quiet + +aws s3api list-object-versions --bucket "$BUCKET_NAME" \ + --query '{Objects: Versions[].{Key:Key,VersionId:VersionId}, Quiet: true}' \ + --output json 2>/dev/null | \ + aws s3api delete-objects --bucket "$BUCKET_NAME" --delete file:///dev/stdin > /dev/null 2>&1 || true + +aws s3api list-object-versions --bucket "$BUCKET_NAME" \ + --query '{Objects: DeleteMarkers[].{Key:Key,VersionId:VersionId}, Quiet: true}' \ + --output json 2>/dev/null | \ + aws s3api delete-objects --bucket "$BUCKET_NAME" --delete file:///dev/stdin > /dev/null 2>&1 || true + +echo "Deleting bucket: $BUCKET_NAME" +aws s3api delete-bucket --bucket "$BUCKET_NAME" + +echo "Deleting stack: $STACK_NAME" +aws cloudformation delete-stack --stack-name "$STACK_NAME" +aws cloudformation wait stack-delete-complete --stack-name "$STACK_NAME" + +echo "Done." diff --git a/cfn/prereq-bucket.yaml b/cfn/prereq-bucket.yaml new file mode 100644 index 00000000..065440a0 --- /dev/null +++ b/cfn/prereq-bucket.yaml @@ -0,0 +1,26 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: >- + Shared S3 bucket reference for tutorials. The bucket is created and deleted + by setup-bucket.sh and cleanup-bucket.sh. This stack just exports the name + so other tutorial stacks can import it. + +Parameters: + BucketName: + Type: String + Description: Name of the bucket created by setup-bucket.sh + +Resources: + Placeholder: + Type: AWS::CloudFormation::WaitConditionHandle + +Outputs: + BucketName: + Description: Name of the shared tutorial bucket + Value: !Ref BucketName + Export: + Name: !Sub '${AWS::StackName}-BucketName' + BucketArn: + Description: ARN of the shared tutorial bucket + Value: !Sub 'arn:aws:s3:::${BucketName}' + Export: + Name: !Sub '${AWS::StackName}-BucketArn' diff --git a/cfn/prereq-vpc-private.yaml b/cfn/prereq-vpc-private.yaml new file mode 100644 index 00000000..3a9b88f3 --- /dev/null +++ b/cfn/prereq-vpc-private.yaml @@ -0,0 +1,83 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: >- + Shared VPC with private subnets only across two AZs. + No internet gateway or NAT gateway. Use VPC endpoints for service access. + Deploy once, reference from tutorial stacks via cross-stack exports. + +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.1.0.0/16 + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}' + + PrivateSubnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.1.1.0/24 + AvailabilityZone: !Select [0, !GetAZs ''] + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-private-1' + + PrivateSubnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.1.2.0/24 + AvailabilityZone: !Select [1, !GetAZs ''] + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-private-2' + + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + + PrivateSubnet1RouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PrivateSubnet1 + RouteTableId: !Ref PrivateRouteTable + + PrivateSubnet2RouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PrivateSubnet2 + RouteTableId: !Ref PrivateRouteTable + + S3Endpoint: + Type: AWS::EC2::VPCEndpoint + Properties: + VpcId: !Ref VPC + ServiceName: !Sub 'com.amazonaws.${AWS::Region}.s3' + RouteTableIds: + - !Ref PrivateRouteTable + +Outputs: + VpcId: + Value: !Ref VPC + Export: + Name: !Sub '${AWS::StackName}-VpcId' + PrivateSubnet1Id: + Value: !Ref PrivateSubnet1 + Export: + Name: !Sub '${AWS::StackName}-PrivateSubnet1' + PrivateSubnet2Id: + Value: !Ref PrivateSubnet2 + Export: + Name: !Sub '${AWS::StackName}-PrivateSubnet2' + PrivateSubnets: + Value: !Join [',', [!Ref PrivateSubnet1, !Ref PrivateSubnet2]] + Export: + Name: !Sub '${AWS::StackName}-PrivateSubnets' + PrivateRouteTableId: + Value: !Ref PrivateRouteTable + Export: + Name: !Sub '${AWS::StackName}-PrivateRouteTable' diff --git a/cfn/prereq-vpc-public.yaml b/cfn/prereq-vpc-public.yaml new file mode 100644 index 00000000..52a350ca --- /dev/null +++ b/cfn/prereq-vpc-public.yaml @@ -0,0 +1,159 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: >- + Shared VPC with public and private subnets across two AZs. + Includes internet gateway, NAT gateway, and route tables. + Deploy once, reference from tutorial stacks via cross-stack exports. + +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.0.0.0/16 + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}' + + InternetGateway: + Type: AWS::EC2::InternetGateway + + GatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref VPC + InternetGatewayId: !Ref InternetGateway + + PublicSubnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.1.0/24 + AvailabilityZone: !Select [0, !GetAZs ''] + MapPublicIpOnLaunch: true + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-public-1' + + PublicSubnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.2.0/24 + AvailabilityZone: !Select [1, !GetAZs ''] + MapPublicIpOnLaunch: true + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-public-2' + + PrivateSubnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.3.0/24 + AvailabilityZone: !Select [0, !GetAZs ''] + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-private-1' + + PrivateSubnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.4.0/24 + AvailabilityZone: !Select [1, !GetAZs ''] + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-private-2' + + NatEip: + Type: AWS::EC2::EIP + DependsOn: GatewayAttachment + + NatGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt NatEip.AllocationId + SubnetId: !Ref PublicSubnet1 + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-nat' + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + + PublicRoute: + Type: AWS::EC2::Route + DependsOn: GatewayAttachment + Properties: + RouteTableId: !Ref PublicRouteTable + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: !Ref InternetGateway + + PublicSubnet1RouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnet1 + RouteTableId: !Ref PublicRouteTable + + PublicSubnet2RouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnet2 + RouteTableId: !Ref PublicRouteTable + + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + + PrivateRoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref PrivateRouteTable + DestinationCidrBlock: 0.0.0.0/0 + NatGatewayId: !Ref NatGateway + + PrivateSubnet1RouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PrivateSubnet1 + RouteTableId: !Ref PrivateRouteTable + + PrivateSubnet2RouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PrivateSubnet2 + RouteTableId: !Ref PrivateRouteTable + +Outputs: + VpcId: + Value: !Ref VPC + Export: + Name: !Sub '${AWS::StackName}-VpcId' + PublicSubnet1Id: + Value: !Ref PublicSubnet1 + Export: + Name: !Sub '${AWS::StackName}-PublicSubnet1' + PublicSubnet2Id: + Value: !Ref PublicSubnet2 + Export: + Name: !Sub '${AWS::StackName}-PublicSubnet2' + PrivateSubnet1Id: + Value: !Ref PrivateSubnet1 + Export: + Name: !Sub '${AWS::StackName}-PrivateSubnet1' + PrivateSubnet2Id: + Value: !Ref PrivateSubnet2 + Export: + Name: !Sub '${AWS::StackName}-PrivateSubnet2' + PublicSubnets: + Value: !Join [',', [!Ref PublicSubnet1, !Ref PublicSubnet2]] + Export: + Name: !Sub '${AWS::StackName}-PublicSubnets' + PrivateSubnets: + Value: !Join [',', [!Ref PrivateSubnet1, !Ref PrivateSubnet2]] + Export: + Name: !Sub '${AWS::StackName}-PrivateSubnets' diff --git a/cfn/setup-bucket.sh b/cfn/setup-bucket.sh new file mode 100755 index 00000000..92b87371 --- /dev/null +++ b/cfn/setup-bucket.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Create the shared tutorial S3 bucket and register it with CloudFormation. +# Usage: ./cfn/setup-bucket.sh +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +STACK_NAME="tutorial-prereqs-bucket" +ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) +REGION=$(aws configure get region 2>/dev/null || echo "us-east-1") +BUCKET_NAME="tutorial-bucket-${ACCOUNT_ID}-${REGION}" + +# Check if stack already exists +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ + --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") + +if [ "$STATUS" = "CREATE_COMPLETE" ] || [ "$STATUS" = "UPDATE_COMPLETE" ]; then + EXISTING=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ + --query 'Stacks[0].Outputs[?OutputKey==`BucketName`].OutputValue' --output text) + echo "Bucket already exists: $EXISTING" + exit 0 +fi + +echo "Creating bucket: $BUCKET_NAME" +if [ "$REGION" = "us-east-1" ]; then + aws s3api create-bucket --bucket "$BUCKET_NAME" +else + aws s3api create-bucket --bucket "$BUCKET_NAME" \ + --create-bucket-configuration LocationConstraint="$REGION" +fi + +aws s3api put-bucket-encryption --bucket "$BUCKET_NAME" \ + --server-side-encryption-configuration \ + '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}' + +aws s3api put-public-access-block --bucket "$BUCKET_NAME" \ + --public-access-block-configuration \ + 'BlockPublicAcls=true,BlockPublicPolicy=true,IgnorePublicAcls=true,RestrictPublicBuckets=true' + +echo "Registering bucket with CloudFormation stack: $STACK_NAME" +aws cloudformation deploy \ + --template-file "$SCRIPT_DIR/prereq-bucket.yaml" \ + --stack-name "$STACK_NAME" \ + --parameter-overrides "BucketName=$BUCKET_NAME" + +echo "Done. Bucket: $BUCKET_NAME" +echo "Other stacks can import: !ImportValue ${STACK_NAME}-BucketName" diff --git a/cleanup.sh b/cleanup.sh new file mode 100755 index 00000000..4c35a206 --- /dev/null +++ b/cleanup.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# Delete a tutorial's CloudFormation stack and optionally clean up prerequisites. +# Usage: ./cleanup.sh +# ./cleanup.sh --prereqs # delete prerequisite stacks +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PREREQ_STACK="tutorial-prereqs" + +if [ "$1" = "--prereqs" ]; then + echo "=== Prerequisite stacks ===" + for STACK in $(aws cloudformation list-stacks --stack-status-filter CREATE_COMPLETE UPDATE_COMPLETE \ + --query "StackSummaries[?starts_with(StackName, '$PREREQ_STACK')].StackName" --output text 2>/dev/null); do + echo " $STACK" + done + + echo "" + echo "Prerequisite stacks are shared across tutorials." + echo "Only delete them when you're done with all tutorials." + read -rp "Delete all prerequisite stacks? (y/n): " CHOICE + [[ ! "$CHOICE" =~ ^[Yy]$ ]] && exit 0 + + # Handle bucket prereq — must empty first + BUCKET_NAME=$(aws cloudformation describe-stacks --stack-name "$PREREQ_STACK-bucket" \ + --query 'Stacks[0].Outputs[?OutputKey==`BucketName`].OutputValue' --output text 2>/dev/null) + if [ -n "$BUCKET_NAME" ] && [ "$BUCKET_NAME" != "None" ]; then + OBJ_COUNT=$(aws s3api list-objects-v2 --bucket "$BUCKET_NAME" --query 'KeyCount' --output text 2>/dev/null || echo "0") + if [ "$OBJ_COUNT" -gt 0 ] 2>/dev/null; then + echo "" + echo "Bucket $BUCKET_NAME contains $OBJ_COUNT objects." + read -rp "Empty the bucket? (y/n): " EMPTY + if [[ "$EMPTY" =~ ^[Yy]$ ]]; then + echo "Emptying bucket..." + aws s3 rm "s3://$BUCKET_NAME" --recursive --quiet + aws s3api list-object-versions --bucket "$BUCKET_NAME" \ + --query '{Objects: Versions[].{Key:Key,VersionId:VersionId}, Quiet: true}' \ + --output json 2>/dev/null | \ + aws s3api delete-objects --bucket "$BUCKET_NAME" --delete file:///dev/stdin > /dev/null 2>&1 || true + echo " Emptied" + else + echo "Cannot delete bucket stack while bucket has objects." + exit 1 + fi + fi + aws cloudformation delete-stack --stack-name "$PREREQ_STACK-bucket" + echo "Deleting $PREREQ_STACK-bucket..." + aws cloudformation wait stack-delete-complete --stack-name "$PREREQ_STACK-bucket" 2>/dev/null + echo " Deleted" + fi + + # Handle VPC prereqs — delete cleanly unless tutorial stacks still reference them + for VPC_TYPE in vpc-public vpc-private; do + VPC_STACK="$PREREQ_STACK-$VPC_TYPE" + STATUS=$(aws cloudformation describe-stacks --stack-name "$VPC_STACK" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") + if [ "$STATUS" != "NONE" ] && [ "$STATUS" != "DELETE_COMPLETE" ]; then + echo "Deleting $VPC_STACK..." + aws cloudformation delete-stack --stack-name "$VPC_STACK" + aws cloudformation wait stack-delete-complete --stack-name "$VPC_STACK" 2>/dev/null && echo " Deleted" || echo " Failed (other stacks may still import from it)" + fi + done + exit 0 +fi + +# Delete a tutorial stack +TUT_DIR="$1" +[ -z "$TUT_DIR" ] && echo "Usage: $0 | --prereqs" && exit 1 + +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +# Check if stack exists +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ + --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") + +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + echo "" + echo "Searching for leftover resources tagged with this stack name..." + echo "(Resources that may have been left behind from a failed deletion)" + echo "" + # Search by tag + aws resourcegroupstaggingapi get-resources \ + --tag-filters "Key=tutorial,Values=$STACK_NAME" \ + --query 'ResourceTagMappingList[].{ARN:ResourceARN}' --output table 2>/dev/null || true + # Also search by name prefix + aws resourcegroupstaggingapi get-resources \ + --tag-filters "Key=aws:cloudformation:stack-name,Values=$STACK_NAME" \ + --query 'ResourceTagMappingList[].{ARN:ResourceARN}' --output table 2>/dev/null || true + exit 0 +fi + +echo "Stack: $STACK_NAME (status: $STATUS)" +echo "" +echo "=== Stack Resources ===" +aws cloudformation list-stack-resources --stack-name "$STACK_NAME" \ + --query 'StackResourceSummaries[].{Type:ResourceType,LogicalId:LogicalResourceId,PhysicalId:PhysicalResourceId,Status:ResourceStatus}' --output table + +echo "" +echo "=== Stack Outputs ===" +aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ + --query 'Stacks[0].Outputs[].{Key:OutputKey,Value:OutputValue}' --output table 2>/dev/null || echo " (none)" + +echo "" +read -rp "Delete stack $STACK_NAME? (y/n): " CHOICE +[[ ! "$CHOICE" =~ ^[Yy]$ ]] && exit 0 + +echo "Deleting..." +aws cloudformation delete-stack --stack-name "$STACK_NAME" +aws cloudformation wait stack-delete-complete --stack-name "$STACK_NAME" +echo "Stack $STACK_NAME deleted." + +echo "" +echo "Note: Prerequisite stacks are still running. To delete them:" +echo " $0 --prereqs" diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 00000000..d57159e2 --- /dev/null +++ b/deploy.sh @@ -0,0 +1,143 @@ +#!/bin/bash +# Deploy a tutorial's CloudFormation stack, creating prerequisites if needed. +# Usage: ./deploy.sh [param=value ...] +# Example: ./deploy.sh 094-aws-cloudtrail-gs +# ./deploy.sh 026-kinesis-data-streams Runtime=python3.12 +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +CFN_DIR="$SCRIPT_DIR/cfn" +TUTS_DIR="$SCRIPT_DIR/tuts" +PREREQ_STACK="tutorial-prereqs" + +usage() { + echo "Usage: $0 [param=value ...]" + echo "" + echo "Tutorials with CloudFormation templates:" + for dir in "$TUTS_DIR"/*/; do + TEMPLATE=$(find "$dir" -name 'cfn-*.yaml' -o -name 'cfn-*.yml' 2>/dev/null | head -1) + [ -n "$TEMPLATE" ] && echo " $(basename "$dir")" + done + exit 0 +} + +[ $# -lt 1 ] && usage + +TUT_DIR="$1" +shift +OVERRIDES="$@" + +# Find the template +TEMPLATE=$(find "$TUTS_DIR/$TUT_DIR" -name 'cfn-*.yaml' -o -name 'cfn-*.yml' 2>/dev/null | head -1) +if [ -z "$TEMPLATE" ]; then + echo "No CloudFormation template found in tuts/$TUT_DIR/" + echo "Looking for files matching cfn-*.yaml" + exit 1 +fi + +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" +echo "Template: $TEMPLATE" +echo "Stack: $STACK_NAME" + +# Check if the template imports from prerequisite stacks +TEMPLATE_CONTENT=$(cat "$TEMPLATE") +NEEDS_BUCKET=false +NEEDS_VPC=false + +if echo "$TEMPLATE_CONTENT" | grep -q "Fn::ImportValue.*prereqs.*BucketName\|prereq-bucket"; then + NEEDS_BUCKET=true +fi +if echo "$TEMPLATE_CONTENT" | grep -q "Fn::ImportValue.*prereqs-vpc-public\|prereq-vpc-public"; then + NEEDS_VPC=true + VPC_TYPE="public" +fi +if echo "$TEMPLATE_CONTENT" | grep -q "Fn::ImportValue.*prereqs-vpc-private\|prereq-vpc-private"; then + NEEDS_VPC=true + VPC_TYPE="private" +fi + +# Deploy prerequisites if needed +if [ "$NEEDS_BUCKET" = true ]; then + echo "" + echo "This tutorial requires a shared S3 bucket." + BUCKET_STACK=$(aws cloudformation describe-stacks --stack-name "$PREREQ_STACK-bucket" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") + if [ "$BUCKET_STACK" = "NONE" ] || [ "$BUCKET_STACK" = "DELETE_COMPLETE" ]; then + echo "Prerequisite stack '$PREREQ_STACK-bucket' not found." + read -rp "Create it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + echo "Creating shared bucket..." + aws cloudformation deploy \ + --template-file "$CFN_DIR/prereq-bucket.yaml" \ + --stack-name "$PREREQ_STACK-bucket" + echo "Bucket created: $(aws cloudformation describe-stacks --stack-name "$PREREQ_STACK-bucket" --query 'Stacks[0].Outputs[?OutputKey==`BucketName`].OutputValue' --output text)" + else + echo "Cannot proceed without the bucket prerequisite." + exit 1 + fi + else + BUCKET_NAME=$(aws cloudformation describe-stacks --stack-name "$PREREQ_STACK-bucket" --query 'Stacks[0].Outputs[?OutputKey==`BucketName`].OutputValue' --output text) + echo "Using existing bucket: $BUCKET_NAME" + fi +fi + +if [ "$NEEDS_VPC" = true ]; then + echo "" + VPC_STACK_NAME="$PREREQ_STACK-vpc-$VPC_TYPE" + echo "This tutorial requires a VPC ($VPC_TYPE subnets)." + VPC_STACK=$(aws cloudformation describe-stacks --stack-name "$VPC_STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") + if [ "$VPC_STACK" = "NONE" ] || [ "$VPC_STACK" = "DELETE_COMPLETE" ]; then + echo "Prerequisite stack '$VPC_STACK_NAME' not found." + read -rp "Create it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + echo "Creating VPC ($VPC_TYPE)..." + aws cloudformation deploy \ + --template-file "$CFN_DIR/prereq-vpc-$VPC_TYPE.yaml" \ + --stack-name "$VPC_STACK_NAME" + echo "VPC created: $(aws cloudformation describe-stacks --stack-name "$VPC_STACK_NAME" --query 'Stacks[0].Outputs[?OutputKey==`VpcId`].OutputValue' --output text)" + else + echo "Cannot proceed without the VPC prerequisite." + exit 1 + fi + else + VPC_ID=$(aws cloudformation describe-stacks --stack-name "$VPC_STACK_NAME" --query 'Stacks[0].Outputs[?OutputKey==`VpcId`].OutputValue' --output text) + echo "Using existing VPC: $VPC_ID" + fi +fi + +# Build capabilities argument +CAPA_ARG="" +if echo "$TEMPLATE_CONTENT" | grep -qE "AWS::IAM::Role|AWS::IAM::Policy|AWS::IAM::InstanceProfile"; then + CAPA_ARG="--capabilities CAPABILITY_IAM" +fi +if echo "$TEMPLATE_CONTENT" | grep -q "RoleName\|PolicyName"; then + CAPA_ARG="--capabilities CAPABILITY_NAMED_IAM" +fi + +# Build overrides argument +OVERRIDES_ARG="" +if [ -n "$OVERRIDES" ]; then + OVERRIDES_ARG="--parameter-overrides $OVERRIDES" +fi + +# Deploy +echo "" +echo "Deploying stack: $STACK_NAME" +aws cloudformation deploy \ + --template-file "$TEMPLATE" \ + --stack-name "$STACK_NAME" \ + $CAPA_ARG \ + $OVERRIDES_ARG \ + --no-fail-on-empty-changeset + +echo "" +echo "=== Stack Resources ===" +aws cloudformation list-stack-resources --stack-name "$STACK_NAME" \ + --query 'StackResourceSummaries[].{Type:ResourceType,LogicalId:LogicalResourceId,PhysicalId:PhysicalResourceId,Status:ResourceStatus}' --output table 2>/dev/null || echo " (none)" + +echo "" +echo "=== Stack Outputs ===" +aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ + --query 'Stacks[0].Outputs[].{Key:OutputKey,Value:OutputValue}' --output table 2>/dev/null || echo " (none)" + +echo "" +echo "To delete: ./cleanup.sh $TUT_DIR" diff --git a/test-cfn.py b/test-cfn.py new file mode 100755 index 00000000..06b2378e --- /dev/null +++ b/test-cfn.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python +"""Test all CloudFormation templates: validate, deploy, verify, delete. + +Usage: python3 test-cfn.py [--parallel N] [--skip-deploy] [--region REGION] +""" + +import argparse +import boto3 +import json +import os +import sys +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from dataclasses import dataclass, field +from pathlib import Path + +PREREQ_STACKS = { + "tutorial-prereqs-bucket": "cfn/prereq-bucket.yaml", + "tutorial-prereqs-vpc-public": "cfn/prereq-vpc-public.yaml", + "tutorial-prereqs-vpc-private": "cfn/prereq-vpc-private.yaml", +} + +@dataclass +class TestResult: + template: str + validate: str = "SKIP" + deploy: str = "SKIP" + delete: str = "SKIP" + duration: float = 0 + error: str = "" + stack_name: str = "" + +def find_templates(repo_root): + """Find all cfn-*.yaml templates in tutorial directories.""" + templates = {} + for p in sorted(Path(repo_root, "tuts").glob("*/cfn-*.yaml")): + tut = p.parent.name + templates[tut] = str(p) + return templates + +def detect_prereqs(template_path): + """Check which prerequisite stacks a template needs.""" + content = Path(template_path).read_text() + needed = [] + if "prereqs-bucket" in content or "prereq-bucket" in content: + needed.append("tutorial-prereqs-bucket") + if "prereqs-vpc-public" in content or "prereq-vpc-public" in content: + needed.append("tutorial-prereqs-vpc-public") + if "prereqs-vpc-private" in content or "prereq-vpc-private" in content: + needed.append("tutorial-prereqs-vpc-private") + return needed + +def needs_iam(template_path): + content = Path(template_path).read_text() + if "RoleName" in content or "PolicyName" in content: + return "CAPABILITY_NAMED_IAM" + if "AWS::IAM::" in content: + return "CAPABILITY_IAM" + return None + +def validate_template(cfn, template_path): + body = Path(template_path).read_text() + cfn.validate_template(TemplateBody=body) + +def deploy_stack(cfn, stack_name, template_path, timeout=600): + body = Path(template_path).read_text() + caps = [] + cap = needs_iam(template_path) + if cap: + caps = [cap] + + try: + cfn.create_stack( + StackName=stack_name, + TemplateBody=body, + Capabilities=caps, + Tags=[{"Key": "test-run", "Value": "cfn-test"}], + TimeoutInMinutes=10, + OnFailure="DELETE", + ) + except cfn.exceptions.AlreadyExistsException: + cfn.delete_stack(StackName=stack_name) + waiter = cfn.get_waiter("stack_delete_complete") + waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 10, "MaxAttempts": 60}) + cfn.create_stack( + StackName=stack_name, + TemplateBody=body, + Capabilities=caps, + Tags=[{"Key": "test-run", "Value": "cfn-test"}], + TimeoutInMinutes=10, + OnFailure="DELETE", + ) + + waiter = cfn.get_waiter("stack_create_complete") + waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 15, "MaxAttempts": int(timeout / 15)}) + +def delete_stack(cfn, stack_name, timeout=300): + cfn.delete_stack(StackName=stack_name) + waiter = cfn.get_waiter("stack_delete_complete") + waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 10, "MaxAttempts": int(timeout / 10)}) + +def ensure_prereqs(cfn, repo_root, needed_stacks): + """Deploy prerequisite stacks if they don't exist. Returns set of failed prereqs.""" + failed = set() + for stack_name in needed_stacks: + try: + resp = cfn.describe_stacks(StackName=stack_name) + status = resp["Stacks"][0]["StackStatus"] + if status in ("CREATE_COMPLETE", "UPDATE_COMPLETE"): + print(f" Prereq {stack_name}: exists ({status})") + continue + elif "ROLLBACK" in status or "FAILED" in status: + print(f" Prereq {stack_name}: cleaning up failed stack...") + cfn.delete_stack(StackName=stack_name) + cfn.get_waiter("stack_delete_complete").wait( + StackName=stack_name, WaiterConfig={"Delay": 10, "MaxAttempts": 30}) + except cfn.exceptions.ClientError: + pass + + template_file = PREREQ_STACKS.get(stack_name) + if not template_file: + failed.add(stack_name) + continue + + template_path = os.path.join(repo_root, template_file) + print(f" Prereq {stack_name}: deploying...") + try: + caps = [] + if needs_iam(template_path): + caps = [needs_iam(template_path)] + body = Path(template_path).read_text() + cfn.create_stack( + StackName=stack_name, TemplateBody=body, Capabilities=caps, + Tags=[{"Key": "test-run", "Value": "cfn-test"}], + TimeoutInMinutes=10, OnFailure="DELETE", + ) + waiter = cfn.get_waiter("stack_create_complete") + waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 15, "MaxAttempts": 40}) + print(f" Prereq {stack_name}: ready") + except Exception as e: + print(f" Prereq {stack_name}: CFN deploy failed ({e})") + print(f" Prereq {stack_name}: Run ./cfn/setup-bucket.sh first, then retry.") + failed.add(stack_name) + try: + cfn.delete_stack(StackName=stack_name) + except Exception: + pass + return failed + +def test_template(cfn, tut_name, template_path, skip_deploy): + """Test a single template: validate, deploy, delete.""" + result = TestResult(template=tut_name) + stack_name = f"cfn-test-{tut_name[:40]}" + result.stack_name = stack_name + start = time.time() + + # Validate + try: + validate_template(cfn, template_path) + result.validate = "PASS" + except Exception as e: + result.validate = "FAIL" + result.error = str(e)[:200] + result.duration = time.time() - start + return result + + if skip_deploy: + result.duration = time.time() - start + return result + + # Deploy + try: + deploy_stack(cfn, stack_name, template_path) + result.deploy = "PASS" + except Exception as e: + result.deploy = "FAIL" + result.error = str(e)[:200] + result.duration = time.time() - start + # Try cleanup + try: + delete_stack(cfn, stack_name) + except Exception: + pass + return result + + # Delete + try: + delete_stack(cfn, stack_name) + result.delete = "PASS" + except Exception as e: + result.delete = "FAIL" + result.error = f"Delete failed: {str(e)[:150]}" + + result.duration = time.time() - start + return result + +def main(): + parser = argparse.ArgumentParser(description="Test CloudFormation templates") + parser.add_argument("--parallel", type=int, default=3, help="Max parallel deployments") + parser.add_argument("--skip-deploy", action="store_true", help="Validate only, don't deploy") + parser.add_argument("--region", default="us-east-1") + parser.add_argument("--repo", default=".", help="Repo root directory") + args = parser.parse_args() + + repo_root = os.path.abspath(args.repo) + cfn = boto3.client("cloudformation", region_name=args.region) + + # Find templates + templates = find_templates(repo_root) + print(f"Found {len(templates)} templates") + + if not args.skip_deploy: + # Collect all needed prereqs + all_prereqs = set() + template_prereqs = {} + for tut, path in templates.items(): + prereqs = detect_prereqs(path) + template_prereqs[tut] = prereqs + all_prereqs.update(prereqs) + + failed_prereqs = set() + if all_prereqs: + print(f"\nDeploying prerequisites: {', '.join(sorted(all_prereqs))}") + failed_prereqs = ensure_prereqs(cfn, repo_root, sorted(all_prereqs)) + if failed_prereqs: + print(f"\nFailed prereqs: {', '.join(failed_prereqs)}") + + # Test templates in parallel + print(f"\nTesting {len(templates)} templates (parallel={args.parallel})...\n") + results = [] + + with ThreadPoolExecutor(max_workers=args.parallel) as pool: + futures = {} + for tut, path in templates.items(): + # Skip if prereqs failed + if not args.skip_deploy: + missing = set(template_prereqs.get(tut, [])) & failed_prereqs + if missing: + r = TestResult(template=tut, validate="PASS", deploy="SKIP", error=f"Prereq failed: {', '.join(missing)}") + results.append(r) + print(f" ⊘ {tut}: skipped (prereq failed)") + continue + # Each thread gets its own client + thread_cfn = boto3.client("cloudformation", region_name=args.region) + future = pool.submit(test_template, thread_cfn, tut, path, args.skip_deploy) + futures[future] = tut + + for future in as_completed(futures): + tut = futures[future] + result = future.result() + results.append(result) + status = "✓" if result.deploy in ("PASS", "SKIP") and result.validate == "PASS" else "✗" + print(f" {status} {result.template}: validate={result.validate} deploy={result.deploy} delete={result.delete} ({result.duration:.0f}s)") + if result.error: + print(f" Error: {result.error}") + + # Report + results.sort(key=lambda r: r.template) + passed = sum(1 for r in results if r.validate == "PASS" and r.deploy in ("PASS", "SKIP")) + failed = len(results) - passed + + print(f"\n{'='*70}") + print(f"RESULTS: {passed} passed, {failed} failed, {len(results)} total") + print(f"{'='*70}") + print(f"{'Template':<45} {'Validate':<10} {'Deploy':<10} {'Delete':<10} {'Time':<8}") + print(f"{'-'*45} {'-'*10} {'-'*10} {'-'*10} {'-'*8}") + for r in results: + print(f"{r.template:<45} {r.validate:<10} {r.deploy:<10} {r.delete:<10} {r.duration:<8.0f}s") + + # Cleanup prereqs if all tests passed + if not args.skip_deploy and failed == 0: + print(f"\nAll tests passed. Prerequisite stacks left running for reuse.") + print(f"To delete: python3 {sys.argv[0]} --cleanup-prereqs") + + return 1 if failed > 0 else 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tuts/001-lightsail-gs/README.md b/tuts/001-lightsail-gs/README.md index baac69af..269bc8c6 100644 --- a/tuts/001-lightsail-gs/README.md +++ b/tuts/001-lightsail-gs/README.md @@ -12,4 +12,31 @@ The script creates the following AWS resources in order: - Lightsail disk (8 GB block storage disk) - Lightsail instance snapshot (backup of the instance) -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** Lightsail instance and disk + +### Deploy with CloudFormation + +```bash +./deploy.sh 001-lightsail-gs +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/001-lightsail-gs/lightsail-gs-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 001-lightsail-gs +``` diff --git a/tuts/001-lightsail-gs/cfn-lightsail-gs.yaml b/tuts/001-lightsail-gs/cfn-lightsail-gs.yaml new file mode 100644 index 00000000..73fc3efd --- /dev/null +++ b/tuts/001-lightsail-gs/cfn-lightsail-gs.yaml @@ -0,0 +1,30 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: Lightsail getting started - instance with attached disk. + +Resources: + Instance: + Type: AWS::Lightsail::Instance + Properties: + InstanceName: !Sub '${AWS::StackName}-instance' + BlueprintId: amazon_linux_2023 + BundleId: nano_3_0 + AvailabilityZone: !Select [0, !GetAZs ''] + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + + Disk: + Type: AWS::Lightsail::Disk + Properties: + DiskName: !Sub '${AWS::StackName}-disk' + SizeInGb: 8 + AvailabilityZone: !Select [0, !GetAZs ''] + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + +Outputs: + InstanceName: + Value: !Ref Instance + DiskName: + Value: !Ref Disk diff --git a/tuts/001-lightsail-gs/lightsail-gs-cfn.sh b/tuts/001-lightsail-gs/lightsail-gs-cfn.sh new file mode 100755 index 00000000..8ee580e3 --- /dev/null +++ b/tuts/001-lightsail-gs/lightsail-gs-cfn.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +INSTANCE=$(get_output InstanceName) +DISK=$(get_output DiskName) +echo "Instance: $INSTANCE" +echo "Disk: $DISK" + +echo "" +echo "--- Step 1: Get instance state ---" +run_cmd aws lightsail get-instance-state --instance-name "$INSTANCE" + +echo "" +echo "--- Step 2: Get instance details ---" +run_cmd aws lightsail get-instance --instance-name "$INSTANCE" --query "'instance.{name:name,blueprint:blueprintId,bundle:bundleId,state:state.name,ip:publicIpAddress}'" --output table + +echo "" +echo "--- Step 3: Create a snapshot ---" +SNAP_NAME="${INSTANCE}-snapshot" +run_cmd aws lightsail create-instance-snapshot --instance-name "$INSTANCE" --instance-snapshot-name "$SNAP_NAME" + +echo "" +echo "--- Step 4: Delete the snapshot ---" +sleep 10 +run_cmd aws lightsail delete-instance-snapshot --instance-snapshot-name "$SNAP_NAME" + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/002-vpc-gs/README.md b/tuts/002-vpc-gs/README.md index c3edb739..c05801dc 100644 --- a/tuts/002-vpc-gs/README.md +++ b/tuts/002-vpc-gs/README.md @@ -25,4 +25,31 @@ The script creates the following AWS resources in order: - EC2 security group (web server security group allowing HTTP/HTTPS) - EC2 security group (database security group allowing MySQL from web servers) -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** VPC with public subnet and EC2 instance + +### Deploy with CloudFormation + +```bash +./deploy.sh 002-vpc-gs +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/002-vpc-gs/vpc-gs-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 002-vpc-gs +``` diff --git a/tuts/002-vpc-gs/cfn-vpc-gs.yaml b/tuts/002-vpc-gs/cfn-vpc-gs.yaml new file mode 100644 index 00000000..fdc04ac0 --- /dev/null +++ b/tuts/002-vpc-gs/cfn-vpc-gs.yaml @@ -0,0 +1,88 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: VPC getting started - VPC with public subnet, internet gateway, and EC2 instance. + +Parameters: + LatestAmiId: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64 + +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.0.0.0/16 + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-vpc' + + InternetGateway: + Type: AWS::EC2::InternetGateway + + GatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref VPC + InternetGatewayId: !Ref InternetGateway + + PublicSubnet: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.1.0/24 + MapPublicIpOnLaunch: true + AvailabilityZone: !Select [0, !GetAZs ''] + + RouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + + PublicRoute: + Type: AWS::EC2::Route + DependsOn: GatewayAttachment + Properties: + RouteTableId: !Ref RouteTable + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: !Ref InternetGateway + + SubnetRouteAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnet + RouteTableId: !Ref RouteTable + + SecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Tutorial SSH access + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: 0.0.0.0/0 + + Instance: + Type: AWS::EC2::Instance + Properties: + InstanceType: t2.micro + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + ImageId: !Ref LatestAmiId + SubnetId: !Ref PublicSubnet + SecurityGroupIds: + - !Ref SecurityGroup + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-instance' + +Outputs: + VpcId: + Value: !Ref VPC + InstanceId: + Value: !Ref Instance + PublicIp: + Value: !GetAtt Instance.PublicIp diff --git a/tuts/002-vpc-gs/vpc-gs-cfn.sh b/tuts/002-vpc-gs/vpc-gs-cfn.sh new file mode 100755 index 00000000..226eaf43 --- /dev/null +++ b/tuts/002-vpc-gs/vpc-gs-cfn.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +VPC_ID=$(get_output VpcId) +INSTANCE_ID=$(get_output InstanceId) +PUBLIC_IP=$(get_output PublicIp) +echo "VPC: $VPC_ID | Instance: $INSTANCE_ID | IP: $PUBLIC_IP" + +echo "" +echo "--- Step 1: Describe the VPC ---" +run_cmd aws ec2 describe-vpcs --vpc-ids "$VPC_ID" --query "'Vpcs[0].{VpcId:VpcId,CIDR:CidrBlock,State:State}'" --output table + +echo "" +echo "--- Step 2: List subnets ---" +run_cmd aws ec2 describe-subnets --filters "Name=vpc-id,Values=$VPC_ID" --query "'Subnets[].{Id:SubnetId,CIDR:CidrBlock,AZ:AvailabilityZone}'" --output table + +echo "" +echo "--- Step 3: Check instance status ---" +run_cmd aws ec2 describe-instances --instance-ids "$INSTANCE_ID" --query "'Reservations[0].Instances[0].{State:State.Name,Type:InstanceType,IP:PublicIpAddress}'" --output table + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/003-s3-gettingstarted/README.md b/tuts/003-s3-gettingstarted/README.md index 54d45a38..0799165f 100644 --- a/tuts/003-s3-gettingstarted/README.md +++ b/tuts/003-s3-gettingstarted/README.md @@ -57,6 +57,33 @@ Free tier eligible. No charges expected for a few objects. --- + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** S3 bucket (uses shared prereq bucket) + +### Deploy with CloudFormation + +```bash +./deploy.sh 003-s3-gettingstarted +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/003-s3-gettingstarted/s3-gettingstarted-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 003-s3-gettingstarted +``` ## Appendix: Generation details | Field | Value | diff --git a/tuts/003-s3-gettingstarted/cfn-s3-gettingstarted.yaml b/tuts/003-s3-gettingstarted/cfn-s3-gettingstarted.yaml new file mode 100644 index 00000000..b1ffb6b2 --- /dev/null +++ b/tuts/003-s3-gettingstarted/cfn-s3-gettingstarted.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: >- + S3 getting started. Uses the shared tutorial bucket from the prereq stack. + Run the CLI script to practice S3 operations on the bucket. + +Parameters: + PrereqStackName: + Type: String + Default: tutorial-prereqs-bucket + +Resources: + BucketReadyCondition: + Type: AWS::CloudFormation::WaitConditionHandle + +Outputs: + BucketName: + Description: Use this bucket with the CLI tutorial + Value: !ImportValue + Fn::Sub: '${PrereqStackName}-BucketName' diff --git a/tuts/003-s3-gettingstarted/s3-gettingstarted-cfn.sh b/tuts/003-s3-gettingstarted/s3-gettingstarted-cfn.sh new file mode 100755 index 00000000..f7dca694 --- /dev/null +++ b/tuts/003-s3-gettingstarted/s3-gettingstarted-cfn.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +BUCKET=$(get_output BucketName) +echo "Bucket: $BUCKET" + +echo "" +echo "--- Step 1: Upload an object ---" +echo "Hello from the S3 tutorial" > /tmp/s3-tut-test.txt +run_cmd aws s3 cp /tmp/s3-tut-test.txt "s3://$BUCKET/tutorial/hello.txt" + +echo "" +echo "--- Step 2: List objects ---" +run_cmd aws s3api list-objects-v2 --bucket "$BUCKET" --prefix tutorial/ --query "'Contents[].{Key:Key,Size:Size}'" --output table + +echo "" +echo "--- Step 3: Download the object ---" +run_cmd aws s3 cp "s3://$BUCKET/tutorial/hello.txt" /tmp/s3-tut-download.txt +echo "Content: $(cat /tmp/s3-tut-download.txt)" + +echo "" +echo "--- Step 4: Copy the object ---" +run_cmd aws s3 cp "s3://$BUCKET/tutorial/hello.txt" "s3://$BUCKET/tutorial/backup/hello.txt" + +echo "" +echo "--- Step 5: List all objects ---" +run_cmd aws s3api list-objects-v2 --bucket "$BUCKET" --prefix tutorial/ --query "'Contents[].Key'" --output table + +echo "" +echo "--- Step 6: Clean up tutorial objects ---" +run_cmd aws s3 rm "s3://$BUCKET/tutorial/" --recursive +rm -f /tmp/s3-tut-test.txt /tmp/s3-tut-download.txt +echo "Bucket remains for other tutorials." diff --git a/tuts/004-cloudmap-custom-attributes/README.md b/tuts/004-cloudmap-custom-attributes/README.md index 367f64e5..3f4f5ce5 100644 --- a/tuts/004-cloudmap-custom-attributes/README.md +++ b/tuts/004-cloudmap-custom-attributes/README.md @@ -26,4 +26,31 @@ The script creates the following AWS resources in order: - Service Discovery instance (e) - Service Discovery instance (f) -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** Cloud Map namespace, DynamoDB table, Lambda function + +### Deploy with CloudFormation + +```bash +./deploy.sh 004-cloudmap-custom-attributes +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/004-cloudmap-custom-attributes/cloudmap-custom-attributes-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 004-cloudmap-custom-attributes +``` diff --git a/tuts/004-cloudmap-custom-attributes/cfn-cloudmap-custom-attributes.yaml b/tuts/004-cloudmap-custom-attributes/cfn-cloudmap-custom-attributes.yaml new file mode 100644 index 00000000..35076c46 --- /dev/null +++ b/tuts/004-cloudmap-custom-attributes/cfn-cloudmap-custom-attributes.yaml @@ -0,0 +1,72 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: Cloud Map custom attributes - namespace, DynamoDB table, and Lambda function. + +Resources: + Namespace: + Type: AWS::ServiceDiscovery::HttpNamespace + Properties: + Name: !Sub '${AWS::StackName}-ns' + + Table: + Type: AWS::DynamoDB::Table + DeletionPolicy: Delete + Properties: + TableName: !Sub '${AWS::StackName}-data' + BillingMode: PAY_PER_REQUEST + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + AttributeDefinitions: + - AttributeName: id + AttributeType: S + KeySchema: + - AttributeName: id + KeyType: HASH + + LambdaRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole + Policies: + - PolicyName: dynamodb-access + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - dynamodb:PutItem + - dynamodb:GetItem + - dynamodb:Query + Resource: !GetAtt Table.Arn + + Function: + Type: AWS::Lambda::Function + Properties: + Runtime: python3.12 + Handler: index.handler + Role: !GetAtt LambdaRole.Arn + Environment: + Variables: + TABLE_NAME: !Ref Table + Code: + ZipFile: | + import os, boto3 + def handler(event, context): + table = boto3.resource('dynamodb').Table(os.environ['TABLE_NAME']) + return {"statusCode": 200} + +Outputs: + NamespaceId: + Value: !GetAtt Namespace.Id + TableName: + Value: !Ref Table + FunctionName: + Value: !Ref Function diff --git a/tuts/004-cloudmap-custom-attributes/cloudmap-custom-attributes-cfn.sh b/tuts/004-cloudmap-custom-attributes/cloudmap-custom-attributes-cfn.sh new file mode 100755 index 00000000..4c52f078 --- /dev/null +++ b/tuts/004-cloudmap-custom-attributes/cloudmap-custom-attributes-cfn.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +NS_ID=$(get_output NamespaceId) +TABLE=$(get_output TableName) +FUNC=$(get_output FunctionName) +echo "Namespace: $NS_ID | Table: $TABLE | Function: $FUNC" + +echo "" +echo "--- Step 1: List services in namespace ---" +run_cmd aws servicediscovery list-services --filters "Name=NAMESPACE_ID,Values=$NS_ID" --query "'Services[].{Id:Id,Name:Name}'" --output table + +echo "" +echo "--- Step 2: Invoke the Lambda function ---" +run_cmd aws lambda invoke --function-name "$FUNC" --cli-binary-format raw-in-base64-out --payload "'{\"action\":\"test\"}'" /tmp/cfn-resp.json +cat /tmp/cfn-resp.json && rm -f /tmp/cfn-resp.json + +echo "" +echo "--- Step 3: Scan DynamoDB table ---" +run_cmd aws dynamodb scan --table-name "$TABLE" --select COUNT --query "'{Count:Count}'" --output table + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/005-cloudfront-gettingstarted/README.md b/tuts/005-cloudfront-gettingstarted/README.md index c79244fb..3ae91a9e 100644 --- a/tuts/005-cloudfront-gettingstarted/README.md +++ b/tuts/005-cloudfront-gettingstarted/README.md @@ -12,4 +12,31 @@ The script creates the following AWS resources in order: - CloudFront distribution - S3 bucket policy -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** CloudFront distribution with S3 origin + +### Deploy with CloudFormation + +```bash +./deploy.sh 005-cloudfront-gettingstarted +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/005-cloudfront-gettingstarted/cloudfront-gettingstarted-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 005-cloudfront-gettingstarted +``` diff --git a/tuts/005-cloudfront-gettingstarted/cfn-cloudfront-gettingstarted.yaml b/tuts/005-cloudfront-gettingstarted/cfn-cloudfront-gettingstarted.yaml new file mode 100644 index 00000000..2463b022 --- /dev/null +++ b/tuts/005-cloudfront-gettingstarted/cfn-cloudfront-gettingstarted.yaml @@ -0,0 +1,58 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: CloudFront getting started - distribution with S3 origin. + +Resources: + OriginBucket: + Type: AWS::S3::Bucket + DeletionPolicy: Delete + + OAC: + Type: AWS::CloudFront::OriginAccessControl + Properties: + OriginAccessControlConfig: + Name: !Sub '${AWS::StackName}-oac' + OriginAccessControlOriginType: s3 + SigningBehavior: always + SigningProtocol: sigv4 + + Distribution: + Type: AWS::CloudFront::Distribution + Properties: + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + DistributionConfig: + Enabled: true + DefaultRootObject: index.html + Origins: + - Id: S3Origin + DomainName: !GetAtt OriginBucket.RegionalDomainName + OriginAccessControlId: !GetAtt OAC.Id + S3OriginConfig: + OriginAccessIdentity: '' + DefaultCacheBehavior: + TargetOriginId: S3Origin + ViewerProtocolPolicy: redirect-to-https + CachePolicyId: 658327ea-f89d-4fab-a63d-7e88639e58f6 + + BucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref OriginBucket + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: cloudfront.amazonaws.com + Action: s3:GetObject + Resource: !Sub '${OriginBucket.Arn}/*' + Condition: + StringEquals: + AWS:SourceArn: !Sub 'arn:aws:cloudfront::${AWS::AccountId}:distribution/${Distribution}' + +Outputs: + DistributionDomain: + Value: !GetAtt Distribution.DomainName + BucketName: + Value: !Ref OriginBucket diff --git a/tuts/005-cloudfront-gettingstarted/cloudfront-gettingstarted-cfn.sh b/tuts/005-cloudfront-gettingstarted/cloudfront-gettingstarted-cfn.sh new file mode 100755 index 00000000..918ffecc --- /dev/null +++ b/tuts/005-cloudfront-gettingstarted/cloudfront-gettingstarted-cfn.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +DOMAIN=$(get_output DistributionDomain) +BUCKET=$(get_output BucketName) +echo "Distribution: $DOMAIN | Bucket: $BUCKET" + +echo "" +echo "--- Step 1: Upload content to origin ---" +echo "

Hello from CloudFront

" > /tmp/cf-index.html +run_cmd aws s3 cp /tmp/cf-index.html "s3://$BUCKET/index.html" --content-type text/html + +echo "" +echo "--- Step 2: Access via CloudFront ---" +echo "URL: https://$DOMAIN/index.html" +run_cmd curl -s --max-time 10 "https://$DOMAIN/index.html" + +echo "" +echo "--- Step 3: Clean up content ---" +run_cmd aws s3 rm "s3://$BUCKET/index.html" +rm -f /tmp/cf-index.html + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/008-vpc-private-servers-gs/README.md b/tuts/008-vpc-private-servers-gs/README.md index f78fe9d3..3583b13a 100644 --- a/tuts/008-vpc-private-servers-gs/README.md +++ b/tuts/008-vpc-private-servers-gs/README.md @@ -29,4 +29,31 @@ The script creates the following AWS resources in order: - EC2 nat gateway (b) - EC2 route (b) -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** VPC with NAT gateway and Auto Scaling group + +### Deploy with CloudFormation + +```bash +./deploy.sh 008-vpc-private-servers-gs +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/008-vpc-private-servers-gs/vpc-private-servers-gs-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 008-vpc-private-servers-gs +``` diff --git a/tuts/008-vpc-private-servers-gs/cfn-vpc-private-servers.yaml b/tuts/008-vpc-private-servers-gs/cfn-vpc-private-servers.yaml new file mode 100644 index 00000000..0d92e5fb --- /dev/null +++ b/tuts/008-vpc-private-servers-gs/cfn-vpc-private-servers.yaml @@ -0,0 +1,116 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: VPC with private servers - NAT gateway, Auto Scaling group in private subnet. + +Parameters: + LatestAmiId: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64 + +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.0.0.0/16 + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + EnableDnsSupport: true + EnableDnsHostnames: true + + PublicSubnet: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.1.0/24 + MapPublicIpOnLaunch: true + AvailabilityZone: !Select [0, !GetAZs ''] + + PrivateSubnet: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + CidrBlock: 10.0.2.0/24 + AvailabilityZone: !Select [0, !GetAZs ''] + + IGW: + Type: AWS::EC2::InternetGateway + IGWAttach: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref VPC + InternetGatewayId: !Ref IGW + + NatEip: + Type: AWS::EC2::EIP + DependsOn: IGWAttach + NatGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt NatEip.AllocationId + SubnetId: !Ref PublicSubnet + + PublicRT: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + PublicRoute: + Type: AWS::EC2::Route + DependsOn: IGWAttach + Properties: + RouteTableId: !Ref PublicRT + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: !Ref IGW + PublicRTAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnet + RouteTableId: !Ref PublicRT + + PrivateRT: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + PrivateRoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref PrivateRT + DestinationCidrBlock: 0.0.0.0/0 + NatGatewayId: !Ref NatGateway + PrivateRTAssoc: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PrivateSubnet + RouteTableId: !Ref PrivateRT + + SG: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Private instances + VpcId: !Ref VPC + + LaunchTemplate: + Type: AWS::EC2::LaunchTemplate + Properties: + LaunchTemplateData: + ImageId: !Ref LatestAmiId + InstanceType: t2.micro + SecurityGroupIds: + - !Ref SG + + ASG: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + LaunchTemplate: + LaunchTemplateId: !Ref LaunchTemplate + Version: !GetAtt LaunchTemplate.LatestVersionNumber + MinSize: '1' + MaxSize: '2' + DesiredCapacity: '1' + VPCZoneIdentifier: + - !Ref PrivateSubnet + +Outputs: + VpcId: + Value: !Ref VPC + ASGName: + Value: !Ref ASG diff --git a/tuts/008-vpc-private-servers-gs/vpc-private-servers-gs-cfn.sh b/tuts/008-vpc-private-servers-gs/vpc-private-servers-gs-cfn.sh new file mode 100755 index 00000000..d204ccca --- /dev/null +++ b/tuts/008-vpc-private-servers-gs/vpc-private-servers-gs-cfn.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +VPC_ID=$(get_output VpcId) +ASG=$(get_output ASGName) +echo "VPC: $VPC_ID | ASG: $ASG" + +echo "" +echo "--- Step 1: Describe Auto Scaling group ---" +run_cmd aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names "$ASG" --query "'AutoScalingGroups[0].{Min:MinSize,Max:MaxSize,Desired:DesiredCapacity,Instances:Instances|length(@)}'" --output table + +echo "" +echo "--- Step 2: Check NAT gateway ---" +run_cmd aws ec2 describe-nat-gateways --filter "Name=vpc-id,Values=$VPC_ID" --query "'NatGateways[0].{Id:NatGatewayId,State:State,IP:NatGatewayAddresses[0].PublicIp}'" --output table + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/009-vpc-ipam-gs/README.md b/tuts/009-vpc-ipam-gs/README.md index bdba9590..7ce81c9f 100644 --- a/tuts/009-vpc-ipam-gs/README.md +++ b/tuts/009-vpc-ipam-gs/README.md @@ -14,4 +14,31 @@ The script creates the following AWS resources in order: - EC2 ipam pool (c) - EC2 vpc -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** VPC IPAM with pool and CIDR + +### Deploy with CloudFormation + +```bash +./deploy.sh 009-vpc-ipam-gs +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/009-vpc-ipam-gs/vpc-ipam-gs-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 009-vpc-ipam-gs +``` diff --git a/tuts/009-vpc-ipam-gs/cfn-vpc-ipam.yaml b/tuts/009-vpc-ipam-gs/cfn-vpc-ipam.yaml new file mode 100644 index 00000000..cfd60a06 --- /dev/null +++ b/tuts/009-vpc-ipam-gs/cfn-vpc-ipam.yaml @@ -0,0 +1,31 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: VPC IPAM getting started - IPAM with pool and VPC. + +Resources: + IPAM: + Type: AWS::EC2::IPAM + Properties: + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + OperatingRegions: + - RegionName: !Ref AWS::Region + + Pool: + Type: AWS::EC2::IPAMPool + Properties: + IpamScopeId: !GetAtt IPAM.PrivateDefaultScopeId + AddressFamily: ipv4 + Locale: !Ref AWS::Region + + PoolCidr: + Type: AWS::EC2::IPAMPoolCidr + Properties: + IpamPoolId: !GetAtt Pool.IpamPoolId + Cidr: 10.0.0.0/16 + +Outputs: + IpamId: + Value: !GetAtt IPAM.IpamId + PoolId: + Value: !GetAtt Pool.IpamPoolId diff --git a/tuts/009-vpc-ipam-gs/vpc-ipam-gs-cfn.sh b/tuts/009-vpc-ipam-gs/vpc-ipam-gs-cfn.sh new file mode 100755 index 00000000..4cc5ff62 --- /dev/null +++ b/tuts/009-vpc-ipam-gs/vpc-ipam-gs-cfn.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +IPAM_ID=$(get_output IpamId) +POOL_ID=$(get_output PoolId) +echo "IPAM: $IPAM_ID | Pool: $POOL_ID" + +echo "" +echo "--- Step 1: Describe the IPAM ---" +run_cmd aws ec2 describe-ipams --ipam-ids "$IPAM_ID" --query "'Ipams[0].{Id:IpamId,Region:IpamRegion}'" --output table + +echo "" +echo "--- Step 2: Get pool CIDRs ---" +run_cmd aws ec2 get-ipam-pool-cidrs --ipam-pool-id "$POOL_ID" --query "'IpamPoolCidrs[].{Cidr:Cidr,State:State}'" --output table + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/010-cloudmap-service-discovery/README.md b/tuts/010-cloudmap-service-discovery/README.md index ace17da1..511c8717 100644 --- a/tuts/010-cloudmap-service-discovery/README.md +++ b/tuts/010-cloudmap-service-discovery/README.md @@ -14,4 +14,31 @@ The script creates the following AWS resources in order: - Service Discovery instance - Service Discovery instance (b) -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** Cloud Map HTTP namespace and service + +### Deploy with CloudFormation + +```bash +./deploy.sh 010-cloudmap-service-discovery +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/010-cloudmap-service-discovery/cloudmap-service-discovery-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 010-cloudmap-service-discovery +``` diff --git a/tuts/010-cloudmap-service-discovery/cfn-cloudmap-service-discovery.yaml b/tuts/010-cloudmap-service-discovery/cfn-cloudmap-service-discovery.yaml new file mode 100644 index 00000000..77e34164 --- /dev/null +++ b/tuts/010-cloudmap-service-discovery/cfn-cloudmap-service-discovery.yaml @@ -0,0 +1,23 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: Cloud Map service discovery - HTTP namespace with service. + +Resources: + Namespace: + Type: AWS::ServiceDiscovery::HttpNamespace + Properties: + Name: !Sub '${AWS::StackName}' + Tags: + - Key: tutorial + Value: !Ref AWS::StackName + + Service: + Type: AWS::ServiceDiscovery::Service + Properties: + Name: web + NamespaceId: !GetAtt Namespace.Id + +Outputs: + NamespaceId: + Value: !GetAtt Namespace.Id + ServiceId: + Value: !GetAtt Service.Id diff --git a/tuts/010-cloudmap-service-discovery/cloudmap-service-discovery-cfn.sh b/tuts/010-cloudmap-service-discovery/cloudmap-service-discovery-cfn.sh new file mode 100755 index 00000000..9adf2877 --- /dev/null +++ b/tuts/010-cloudmap-service-discovery/cloudmap-service-discovery-cfn.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +NS_ID=$(get_output NamespaceId) +SVC_ID=$(get_output ServiceId) +echo "Namespace: $NS_ID | Service: $SVC_ID" + +echo "" +echo "--- Step 1: Register an instance ---" +INST_ID="i-$(openssl rand -hex 4)" +run_cmd aws servicediscovery register-instance --service-id "$SVC_ID" --instance-id "$INST_ID" --attributes AWS_INSTANCE_IPV4=10.0.0.1 + +echo "" +echo "--- Step 2: List instances ---" +run_cmd aws servicediscovery list-instances --service-id "$SVC_ID" --query "'Instances[].{Id:Id,IP:Attributes.AWS_INSTANCE_IPV4}'" --output table + +echo "" +echo "--- Step 3: Deregister the instance ---" +run_cmd aws servicediscovery deregister-instance --service-id "$SVC_ID" --instance-id "$INST_ID" + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/011-getting-started-batch-fargate/README.md b/tuts/011-getting-started-batch-fargate/README.md index 9b6f6dee..650de95d 100644 --- a/tuts/011-getting-started-batch-fargate/README.md +++ b/tuts/011-getting-started-batch-fargate/README.md @@ -14,4 +14,31 @@ The script creates the following AWS resources in order: - Batch job queue - Batch job definition -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** Batch compute environment and job queue on Fargate + +### Deploy with CloudFormation + +```bash +./deploy.sh 011-getting-started-batch-fargate +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/011-getting-started-batch-fargate/batch-fargate-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 011-getting-started-batch-fargate +``` diff --git a/tuts/011-getting-started-batch-fargate/batch-fargate-cfn.sh b/tuts/011-getting-started-batch-fargate/batch-fargate-cfn.sh new file mode 100755 index 00000000..2a273067 --- /dev/null +++ b/tuts/011-getting-started-batch-fargate/batch-fargate-cfn.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +CE_ARN=$(get_output ComputeEnvironmentArn) +JQ_ARN=$(get_output JobQueueArn) +echo "Compute Env: $CE_ARN" +echo "Job Queue: $JQ_ARN" + +echo "" +echo "--- Step 1: Describe compute environment ---" +run_cmd aws batch describe-compute-environments --compute-environments "$CE_ARN" --query "'computeEnvironments[0].{name:computeEnvironmentName,state:state,status:status}'" --output table + +echo "" +echo "--- Step 2: Describe job queue ---" +run_cmd aws batch describe-job-queues --job-queues "$JQ_ARN" --query "'jobQueues[0].{name:jobQueueName,state:state,priority:priority}'" --output table + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR" diff --git a/tuts/011-getting-started-batch-fargate/cfn-batch-fargate.yaml b/tuts/011-getting-started-batch-fargate/cfn-batch-fargate.yaml new file mode 100644 index 00000000..d477d321 --- /dev/null +++ b/tuts/011-getting-started-batch-fargate/cfn-batch-fargate.yaml @@ -0,0 +1,49 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: AWS Batch on Fargate - compute environment and job queue. + +Resources: + BatchServiceRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: batch.amazonaws.com + Action: sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole + + ComputeEnvironment: + Type: AWS::Batch::ComputeEnvironment + Properties: + Type: MANAGED + ComputeResources: + Type: FARGATE + MaxvCpus: 4 + Subnets: + - !Select [0, !Split [',', !ImportValue tutorial-prereqs-vpc-public-PrivateSubnets]] + SecurityGroupIds: + - !Ref BatchSecurityGroup + ServiceRole: !GetAtt BatchServiceRole.Arn + + BatchSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Batch Fargate compute environment + VpcId: !ImportValue tutorial-prereqs-vpc-public-VpcId + + JobQueue: + Type: AWS::Batch::JobQueue + Properties: + Priority: 1 + ComputeEnvironmentOrder: + - Order: 1 + ComputeEnvironment: !Ref ComputeEnvironment + +Outputs: + ComputeEnvironmentArn: + Value: !Ref ComputeEnvironment + JobQueueArn: + Value: !Ref JobQueue diff --git a/tuts/012-transitgateway-gettingstarted/README.md b/tuts/012-transitgateway-gettingstarted/README.md index 98abdaf3..be3b1f26 100644 --- a/tuts/012-transitgateway-gettingstarted/README.md +++ b/tuts/012-transitgateway-gettingstarted/README.md @@ -20,4 +20,31 @@ The script creates the following AWS resources in order: - EC2 route - EC2 route (b) -The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. \ No newline at end of file +The script prompts you to clean up resources when you run it, including if there's an error part way through. If you need to clean up resources later, you can use the script log as a reference point for which resources were created. + + +## CloudFormation + +This tutorial includes a CloudFormation template that creates the same resources as the CLI script. + +**Resources created:** Transit gateway with two VPCs + +### Deploy with CloudFormation + +```bash +./deploy.sh 012-transitgateway-gettingstarted +``` + +### Run the interactive steps + +Once deployed, run the interactive tutorial steps against the CloudFormation-created resources. Each command is displayed with resolved values so you can run them individually. + +```bash +bash tuts/012-transitgateway-gettingstarted/transitgateway-gettingstarted-cfn.sh +``` + +### Clean up + +```bash +./cleanup.sh 012-transitgateway-gettingstarted +``` diff --git a/tuts/012-transitgateway-gettingstarted/cfn-transitgateway.yaml b/tuts/012-transitgateway-gettingstarted/cfn-transitgateway.yaml new file mode 100644 index 00000000..dccacf5c --- /dev/null +++ b/tuts/012-transitgateway-gettingstarted/cfn-transitgateway.yaml @@ -0,0 +1,71 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: Transit Gateway getting started - TGW with two VPCs. + +Resources: + VPC1: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.0.0.0/16 + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-vpc1' + - Key: tutorial + Value: !Ref AWS::StackName + + VPC2: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.1.0.0/16 + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-vpc2' + - Key: tutorial + Value: !Ref AWS::StackName + + Subnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC1 + CidrBlock: 10.0.1.0/24 + AvailabilityZone: !Select [0, !GetAZs ''] + + Subnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC2 + CidrBlock: 10.1.1.0/24 + AvailabilityZone: !Select [0, !GetAZs ''] + + TGW: + Type: AWS::EC2::TransitGateway + Properties: + Description: Tutorial transit gateway + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-tgw' + - Key: tutorial + Value: !Ref AWS::StackName + + TGWAttach1: + Type: AWS::EC2::TransitGatewayAttachment + Properties: + TransitGatewayId: !Ref TGW + VpcId: !Ref VPC1 + SubnetIds: + - !Ref Subnet1 + + TGWAttach2: + Type: AWS::EC2::TransitGatewayAttachment + Properties: + TransitGatewayId: !Ref TGW + VpcId: !Ref VPC2 + SubnetIds: + - !Ref Subnet2 + +Outputs: + TransitGatewayId: + Value: !Ref TGW + VPC1Id: + Value: !Ref VPC1 + VPC2Id: + Value: !Ref VPC2 diff --git a/tuts/012-transitgateway-gettingstarted/transitgateway-gettingstarted-cfn.sh b/tuts/012-transitgateway-gettingstarted/transitgateway-gettingstarted-cfn.sh new file mode 100755 index 00000000..efd27ed3 --- /dev/null +++ b/tuts/012-transitgateway-gettingstarted/transitgateway-gettingstarted-cfn.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Run the interactive tutorial steps against resources created by CloudFormation. +# If the stack does not exist, offers to deploy it first. +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TUT_DIR="$(basename "$SCRIPT_DIR")" +STACK_NAME="tutorial-$(echo "$TUT_DIR" | sed 's/^[0-9]*-//')" + +get_output() { aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query "Stacks[0].Outputs[?OutputKey==\`$1\`].OutputValue" --output text 2>/dev/null; } + +run_cmd() { + echo "" + echo "$ $@" + eval "$@" +} + +# Check if stack exists, offer to create +STATUS=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" --query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "NONE") +if [ "$STATUS" = "NONE" ] || [ "$STATUS" = "DELETE_COMPLETE" ]; then + echo "Stack $STACK_NAME does not exist." + read -rp "Deploy it now? (y/n): " CHOICE + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + "$REPO_ROOT/deploy.sh" "$TUT_DIR" + else + echo "Cannot proceed without the stack. Deploy with: ./deploy.sh $TUT_DIR" + exit 1 + fi +fi +echo "Stack: $STACK_NAME ($STATUS)" + +TGW_ID=$(get_output TransitGatewayId) +VPC1=$(get_output VPC1Id) +VPC2=$(get_output VPC2Id) +echo "TGW: $TGW_ID | VPC1: $VPC1 | VPC2: $VPC2" + +echo "" +echo "--- Step 1: Describe the transit gateway ---" +run_cmd aws ec2 describe-transit-gateways --transit-gateway-ids "$TGW_ID" --query "'TransitGateways[0].{Id:TransitGatewayId,State:State}'" --output table + +echo "" +echo "--- Step 2: List attachments ---" +run_cmd aws ec2 describe-transit-gateway-attachments --filters "Name=transit-gateway-id,Values=$TGW_ID" --query "'TransitGatewayAttachments[].{VPC:ResourceId,State:State}'" --output table + +echo "" +echo "Interactive steps complete." +echo "To delete stack: ./cleanup.sh $TUT_DIR"