From c3a967b29468da2398382efaabbc513ce66851c6 Mon Sep 17 00:00:00 2001 From: Sean Shriver Date: Fri, 22 Mar 2024 15:46:33 -0500 Subject: [PATCH] LCDC and LMR updates (#109) * Updating LMR globalflix app * Updating LMR lab guide for 2024 * LCDC: UX improvement for DLQ ARNs, style changes, cleanup process streamlined --- .../change-data-capture/clean-up/index.en.md | 22 +++++++++---------- .../change-data-capture/ex1/create-dlq.en.md | 7 ++++++ .../change-data-capture/ex2/create-dlq.en.md | 7 ++++++ .../ex2/create-lambda.en.md | 2 +- .../ex2/enable-kds-streams.en.md | 12 ++++++---- .../gt_discussion/index.en.md | 10 +++++---- .../module_1/index.en.md | 6 ++--- .../module_3/index.en.md | 2 +- .../summary/index.en.md | 2 +- .../global-serverless/sample-data.json | 2 +- global-serverless/web/globalflix.html | 2 +- global-serverless/web/index.html | 4 ++-- global-serverless/web/player.html | 2 +- 13 files changed, 49 insertions(+), 31 deletions(-) diff --git a/content/change-data-capture/clean-up/index.en.md b/content/change-data-capture/clean-up/index.en.md index 0dda3f44..0af5e7fa 100644 --- a/content/change-data-capture/clean-up/index.en.md +++ b/content/change-data-capture/clean-up/index.en.md @@ -1,5 +1,5 @@ --- -title: "5. Summary & Clean Up" +title: "5. Summary and Clean Up" date: 2023-12-01T00:00:00-00:00 weight: 20 chapter: true @@ -16,32 +16,30 @@ If you used your own account, please remove the following resources: * The Lambda Function Event Source Mappings: ```bash -uuid=`aws lambda list-event-source-mappings \ - --function-name create-order-history-kds | jq '.EventSourceMappings[].UUID' --raw-output` -aws lambda delete-event-source-mapping --uuid ${uuid} > output.log -uuid=`aws lambda list-event-source-mappings \ - --function-name create-order-history-ddbs | jq '.EventSourceMappings[].UUID' --raw-output` -aws lambda delete-event-source-mapping --uuid ${uuid} >> output.log +UUID_1=`aws lambda list-event-source-mappings --function-name create-order-history-kds --query 'EventSourceMappings[].UUID' --output text` +UUID_2=`aws lambda list-event-source-mappings --function-name create-order-history-ddbs --query 'EventSourceMappings[].UUID' --output text` +aws lambda delete-event-source-mapping --uuid ${UUID_1} +aws lambda delete-event-source-mapping --uuid ${UUID_2} ``` * The AWS Lambda functions created during the labs: ```bash -aws lambda delete-function --function-name create-order-history-ddbs >> output.log -aws lambda delete-function --function-name create-order-history-kds >> output.log +aws lambda delete-function --function-name create-order-history-ddbs +aws lambda delete-function --function-name create-order-history-kds ``` * The AWS Kinesis data stream created during the labs: ```bash -aws kinesis delete-stream --stream-name Orders >> output.log +aws kinesis delete-stream --stream-name Orders ``` * The Amazon DynamoDB tables created in the Getting Started section of the lab: ```bash -aws dynamodb delete-table --table-name Orders >> output.log -aws dynamodb delete-table --table-name OrdersHistory >> output.log +aws dynamodb delete-table --table-name Orders +aws dynamodb delete-table --table-name OrdersHistory ``` * The Amazon SQS queues created during the labs: diff --git a/content/change-data-capture/ex1/create-dlq.en.md b/content/change-data-capture/ex1/create-dlq.en.md index c22a715c..2d6cbce9 100644 --- a/content/change-data-capture/ex1/create-dlq.en.md +++ b/content/change-data-capture/ex1/create-dlq.en.md @@ -20,3 +20,10 @@ Sample output: "QueueUrl": "https://sqs.{aws-region}.amazonaws.com/{aws-account-id}/orders-ddbs-dlq" } ``` + +Later you will need the queue ARN. Use the below command, modifying the queue URL after *--queue-url* to match the result of the previous command, and then save the ARN for later use. + +```bash +aws sqs get-queue-attributes --attribute-names "QueueArn" --query 'Attributes.QueueArn' --output text \ +--queue-url "https://sqs.{aws-region}.amazonaws.com/{aws-account-id}/orders-ddbs-dlq" +``` \ No newline at end of file diff --git a/content/change-data-capture/ex2/create-dlq.en.md b/content/change-data-capture/ex2/create-dlq.en.md index 5642de54..c0db838a 100644 --- a/content/change-data-capture/ex2/create-dlq.en.md +++ b/content/change-data-capture/ex2/create-dlq.en.md @@ -20,3 +20,10 @@ Sample output: "QueueUrl": "https://sqs.{aws-region}.amazonaws.com/{aws-account-id}/orders-kds-dlq" } ``` + +As before you will need the queue ARN. Use the below command, modifying the queue URL after *--queue-url* to match the result of the previous command. + +```bash +aws sqs get-queue-attributes --attribute-names "QueueArn" --query 'Attributes.QueueArn' --output text \ +--queue-url "https://sqs.{aws-region}.amazonaws.com/{aws-account-id}/orders-kds-dlq" +``` \ No newline at end of file diff --git a/content/change-data-capture/ex2/create-lambda.en.md b/content/change-data-capture/ex2/create-lambda.en.md index d957c29c..3d26bea6 100644 --- a/content/change-data-capture/ex2/create-lambda.en.md +++ b/content/change-data-capture/ex2/create-lambda.en.md @@ -11,7 +11,7 @@ Create a lambda function to copy changed records from the Orders DynamoDB stream 2. In the **Functions** section, click on **Create function**. 3. Select **Author from scratch**. 4. Set **create-order-history-kds** as the function name. -5. Select a version of **Python** as the runtime. +5. Select **Python 3.11** as the runtime. 6. Expand the **Change default execution role** section. 7. Select **Create a new role from AWS policy templates**. 8. Set **create-order-history-kds-execution-role** as the role name. diff --git a/content/change-data-capture/ex2/enable-kds-streams.en.md b/content/change-data-capture/ex2/enable-kds-streams.en.md index b38bc226..90cbc0fe 100644 --- a/content/change-data-capture/ex2/enable-kds-streams.en.md +++ b/content/change-data-capture/ex2/enable-kds-streams.en.md @@ -38,16 +38,19 @@ Confirm that the stream is active using the following command. ```bash aws kinesis describe-stream \ --stream-name Orders \ - --query "StreamDescription.StreamStatus" + --query "StreamDescription.[StreamStatus, StreamARN] ``` Sample output: ``` -"ACTIVE" +[ + "ACTIVE", + "arn:aws:kinesis:${REGION}:${ACCOUNT_ID}:stream/Orders" +] ``` -Enable Kinesis streaming for the Orders DynamoDB table using following command. +Enable Kinesis streaming for the Orders DynamoDB table using following command. Copy the ARN from the previous command into the *--stream-arn* parameter. ```bash aws dynamodb enable-kinesis-streaming-destination \ @@ -61,7 +64,8 @@ Sample output: { "TableName": "Orders", "StreamArn": "arn:aws:kinesis:${REGION}:${ACCOUNT_ID}:stream/Orders", - "DestinationStatus": "ENABLING" + "DestinationStatus": "ENABLING", + "EnableKinesisStreamingConfiguration": {} } ``` diff --git a/content/global-serverless-application/gt_discussion/index.en.md b/content/global-serverless-application/gt_discussion/index.en.md index 131e421d..acd8e9d8 100644 --- a/content/global-serverless-application/gt_discussion/index.en.md +++ b/content/global-serverless-application/gt_discussion/index.en.md @@ -31,11 +31,12 @@ to discuss interesting aspects of Global Tables with a DynamoDB specialist. * https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html ## How is read and write throughput managed for Global Tables? * The write capacity must be the same on all table instances across Regions. With GTv2 the write capacity is automatically kept in sync by the GT infrastructure, so a write capacity change to one table replicates to the others. The table must support auto scaling or be in on-demand mode. -* Read capacity is allowed to differ because reads may not be equal across Regions. When adding a global replica to a table the capacity of the source Region is propagated. After creation you can adjust the read capacity, which is not transferred to the other side. +* Read capacity is allowed to differ because reads may not be equal across Regions. When adding a global replica to a table the capacity of the source Region is propagated. After creation you can adjust the read capacity, which is not transferred to the other side. Read capacity can be adjusted for each region's global secondary index as well through [provisioned throughput overrides](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ReplicaGlobalSecondaryIndex.html#DDB-Type-ReplicaGlobalSecondaryIndex-ProvisionedThroughputOverride). ## What Regions does Global Tables support? -* As of today, GTv2 supports 17 Regions. The latest list can be seen in the drop-down on the Console when choosing a Region in which to add a replica. +* As of today, GTv2 supports more than 32 Regions. The latest list can be seen in the drop-down on the Console when choosing a Region in which to add a replica. ## How are GSIs handled with Global Tables? * With GTv2, you create a GSI in one Region, and it’s automatically replicated to the other Region(s) as well as automatically backfilled. +* Write capacity must be the same on each index copy, but you can override the read capacity on a per-region basis. ## How do I delete a global table? * You can delete a replica table the same as any other, which will stop replication to that Region and delete the table copy kept in that Region. You cannot however ask to sever the replication and have copies of the table exist as independent entities. * There’s also a rule you can’t delete a source table quickly after it’s used to initiate a new Region. If you try you get the error: “Replica cannot be deleted because it has acted as a source Region for new replica(s) being added to the table in the last 24 hours..” @@ -44,12 +45,13 @@ to discuss interesting aspects of Global Tables with a DynamoDB specialist. There are several ways to avoid conflicts, such as using an IAM policy to only allow writes to the table in one region, routing users to only one region and keeping the other as an idle standby, routing odd users to one region and even users to another region, avoiding the use of non-idempotent updates such as Bookmark = Bookmark + 1 in favor of static updates such as Bookmark=25. +* For more information, review our best practice guide [on request routing in with global tables](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-global-table-design.prescriptive-guidance.request-routing.html). ## What are best practices for deploying Global Tables? How can I automate deployment? * In AWS CloudFormation, each global table is controlled by a single stack, in a single Region, regardless of the number of replicas. When you deploy your template, CloudFormation will create/update all replicas as part of a single stack operation. You should not deploy the same `AWS::DynamoDB::GlobalTable` resource in multiple Regions. Doing so will result in errors, and is unsupported. If you deploy your application template in multiple Regions, you can use conditions to only create the resource in a single Region. Alternatively, you can choose to define your `AWS::DynamoDB::GlobalTable` resources in a stack separate from your application stack, and make sure it is only deployed to a single Region. * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-globaltable.html * A DynamoDB table is `AWS::DynamoDB::Table` and a global table is `AWS::DynamoDB::GlobalTable`, which essentially makes them two different resources in regards to CFN. One approach then is to create all tables that might ever be global by using the GlobalTable construct, keep them as standalone tables initially, and later add Regions if needed. * If you have a regular table and you want to convert it while using CloudFormation, here is the recipe: Set the deletion policy to retain, remove the table from the stack, convert the table to a Global Table in the console, then import the global table as a new resource to the stack. -* Note that cross-account replication is not supported at this time. +* Note that cross-account replication is not supported at this time (mid-2024). ## How do I monitor Global Tables? * Using Amazon CloudWatch you can observe a metric `ReplicationLatency` which tracks the elapsed time between when an item is written to a replica table and when that item appears in another replica in the global table. It’s expressed in milliseconds and is emitted for every source- and destination-Region pair. * The latencies you will observe depends on many things including the distance between your chosen Regions. It’s common to see latencies in the 0.5 to 2.5 second range for Regions within the same geographic area. @@ -72,4 +74,4 @@ avoiding the use of non-idempotent updates such as Bookmark = Bookmark + 1 in fa ## What’s the best practice for using Global Tables as part of handling a potential Region outage? * Have (or be able to quickly create) independent copies of your execution stack in alternative Regions, each accessing its local DynamoDB endpoint. Use Route53 or Global Accelerator to route to the nearest healthy Region, or have the client aware of the multiple endpoints it might use. Use health checks in each Region that will be able to determine reliably if there’s any issue with the stack, including if DynamoDB is degraded. For example, don’t just ping that the DynamoDB endpoint is up, actually do a call that ensures a full successful database flow. Should the health check fail, traffic can route to other Regions (by updating the DNS entry with Route53, by having Global Accelerator route differently, or by having the client choose a different endpoint). Global Tables have a good RPO (recovery point objective) because the data is continuously syncing and a good RTO (recovery time objective) because both Regions always keep a table ready for both read and write traffic. * Note that DynamoDB is a core service on which other services frequently build their control plane operations, thus it’s unlikely you’ll encounter a scenario where DynamoDB has degraded service in a Region while other services are unimpacted. -* A good discussion of health checks can be found at https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-types.html +* Read [Evacuating a Region with global tables](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-global-table-design.prescriptive-guidance.evacuation.html) in our developer docs for more information. diff --git a/content/global-serverless-application/module_1/index.en.md b/content/global-serverless-application/module_1/index.en.md index 2f4fec16..99d4d274 100644 --- a/content/global-serverless-application/module_1/index.en.md +++ b/content/global-serverless-application/module_1/index.en.md @@ -13,9 +13,9 @@ We will use AWS Cloud9 for this event. [AWS Cloud9](https://aws.amazon.com/cloud 1. Choose **Services** at the top of the page, and then choose **Cloud9** under **Developer Tools**. -2. There would be an environment ready to use under **Your environments**. +2. There will be an environment ready to use under **My environments**. -3. Click on **Open IDE**, your IDE should open with a welcome note. +3. Click on **Open** under **Cloud9 IDE**, and your IDE should open with a welcome note. You should now see your AWS Cloud9 environment. You need to be familiar with the three areas of the AWS Cloud9 console shown in the following screenshot: @@ -35,7 +35,7 @@ Run ```sudo python3 -m pip install chalice``` to install [AWS Chalice](https://g ::alert[You may see a couple of WARNING lines near the bottom of the command output, these are safely ignored.]{type="info"} -4. Run ```curl -0 https://amazon-dynamodb-labs.com/assets/global-serverless.zip``` +4. Run ```curl -O https://amazon-dynamodb-labs.com/assets/global-serverless.zip``` 5. Run ```unzip global-serverless.zip && cd global-serverless``` 6. To see what application resources we will be deploying you can open the **app.py** file by navigating to "global-serverless/app.py" in the file explorer. This code defines Lambda function and API Gateway routes. diff --git a/content/global-serverless-application/module_3/index.en.md b/content/global-serverless-application/module_3/index.en.md index 88e1b5bc..6c0313ad 100644 --- a/content/global-serverless-application/module_3/index.en.md +++ b/content/global-serverless-application/module_3/index.en.md @@ -37,7 +37,7 @@ Underneath the player you can see a log of each write operation performed, note 3. Return to the AWS console and search for "Lambda" using the search bar at the top 4. A function named "global-serverless-dev" should be listed on the functions page, click the function name. If you do not see it listed check to make sure you are in one of the two regions you deployed to with Chalice on the top right of the page -5. Use the "Throttle" button on the top right of the page to set the Lambda functions maximum concurrency to 0, halting any future invocations of the function in this region. +5. Use the **Throttle** button on the top right of the page to set the Lambda functions maximum concurrency to 0, halting any future invocations of the function in this region. ![lambda_throttle](/static/images/global-serverless-application/module_3/lambda_throttle.png) diff --git a/content/global-serverless-application/summary/index.en.md b/content/global-serverless-application/summary/index.en.md index eae6e6e7..836be4fb 100644 --- a/content/global-serverless-application/summary/index.en.md +++ b/content/global-serverless-application/summary/index.en.md @@ -12,7 +12,7 @@ temporary account will be destroyed at the end of the event and all the resources will be deleted, so there is nothing else for you to do. If you are running this workshop in your own AWS account, you may -delete the `global-app` CloudFormation Stack and the Chalice created components to avoid incurring +delete the `DynamoDBID` CloudFormation Stack and the Chalice created components to avoid incurring any charges afterwards. The DynamoDB table was created in On Demand mode so there are no charges for provisioned capacity levels, only for actual usage. However there will be small charges for storage if the table is not cleaned up. diff --git a/global-serverless/global-serverless/sample-data.json b/global-serverless/global-serverless/sample-data.json index 3fed1538..ecc3bde8 100644 --- a/global-serverless/global-serverless/sample-data.json +++ b/global-serverless/global-serverless/sample-data.json @@ -262,7 +262,7 @@ "S": "Big Buck Bunny" }, "video": { - "S": "https://commons.wikimedia.org/wiki/File:Big_Buck_Bunny_4K.webm" + "S": "https://upload.wikimedia.org/wikipedia/commons/c/c0/Big_Buck_Bunny_4K.webm" } } } diff --git a/global-serverless/web/globalflix.html b/global-serverless/web/globalflix.html index 438017a7..f707eb82 100644 --- a/global-serverless/web/globalflix.html +++ b/global-serverless/web/globalflix.html @@ -66,7 +66,7 @@

Popular -

© 2022 Globalflix, Inc.

+

© 2024 Globalflix, Inc.

diff --git a/global-serverless/web/index.html b/global-serverless/web/index.html index 21f69423..1202170b 100644 --- a/global-serverless/web/index.html +++ b/global-serverless/web/index.html @@ -43,9 +43,9 @@ - + diff --git a/global-serverless/web/player.html b/global-serverless/web/player.html index fddc004b..77e64f15 100644 --- a/global-serverless/web/player.html +++ b/global-serverless/web/player.html @@ -117,7 +117,7 @@ -

© 2022 Globalflix, Inc.

+

© 2024 Globalflix, Inc.