diff --git a/.github/scripts/build-assets.py b/.github/scripts/build-assets.py
index 10d53fad..a77139cf 100644
--- a/.github/scripts/build-assets.py
+++ b/.github/scripts/build-assets.py
@@ -1,8 +1,11 @@
import os
+import re
import sys
+import time
import glob
import shutil
import ntpath
+import tempfile
import subprocess
from pathlib import Path
from zipfile import ZipFile
@@ -69,4 +72,46 @@
workshop_zip.write(python_script, tail)
shutil.move(os.path.join(os.getcwd(), zip_file_name), os.path.join(dest_root, 'assets', zip_file_name))
-exit()
+# Check build
+
+preview_build = os.path.join(pkg_root, 'preview_build')
+shell_out = tempfile.NamedTemporaryFile(mode='w')
+try:
+ proc = subprocess.Popen([preview_build,"-disable-refresh"],
+ stdout=shell_out, stderr=shell_out, cwd=pkg_root)
+except FileNotFoundError as err:
+ proc = subprocess.Popen(["preview_build", "-disable-refresh"],
+ stdout=shell_out, stderr=shell_out, cwd=pkg_root)
+
+
+time.sleep(10)
+proc.kill()
+build_result_error = r'.*(Build complete with [0-9].*)'
+build_result_success = r'.*(Build succeeded.*)'
+status = None
+status_message = None
+count = 0
+with open(shell_out.name) as f:
+ for line in f:
+ if count > 10000:
+ break
+ count += 1
+ if status == None:
+ match_error = re.search(build_result_error, line)
+ match_success = re.search(build_result_success, line)
+ if match_error:
+ status_message = match_error.group(1)
+ status = 1
+ print("Discovered an error in the build process.\n{}".format(status_message))
+ elif match_success:
+ status_message = match_success.group(1)
+ status = 0
+ print("Success. Build result is: \n{}".format(status_message))
+ elif status == 1:
+ err_match = re.search(r'^.*ERR(.*)', line)
+ err_ignore = re.search(r'^.*Error hosting local preview site.*', line)
+ if err_match and err_ignore is None:
+ print("{}".format(err_match.group(1)))
+
+shell_out.close()
+exit(status)
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 2120ca69..04facd03 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -3,47 +3,36 @@ name: PushToProd
permissions:
id-token: write
on:
- workflow_dispatch:
- inputs:
- website:
- description: 'Name of the S3 bucket aka website to publish to'
- required: true
- default: 'amazon-dynamodb-labs.com'
- options:
- - 'test.amazon-dynamodb-labs.com'
- - 'amazon-dynamodb-labs.com'
+ push:
+ branches:
+ - master
jobs:
buildAndDeploy:
runs-on: ubuntu-latest
env:
- STEP_S3_BUCKET: ${{ github.event.inputs.website }}
+ STEP_S3_BUCKET: amazon-dynamodb-labs.com
steps:
- name: Checkout
uses: actions/checkout@v3
with:
submodules: 'recursive'
fetch-depth: '0'
- - name: Setup Hugo
- uses: peaceiris/actions-hugo@v2
- with:
- hugo-version: '0.102.3'
- # extended: true
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- - name: Build Hugo
- run: hugo --buildFuture
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: us-east-1
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
+ - name: Pull preview build
+ run: aws s3 sync s3://amazon-dynamodb-labs-static/build/ . && chmod +x preview_build
- name: Build Assets
run: python3 ./.github/scripts/build-assets.py
- name: S3Sync
- run: aws s3 sync public s3://$STEP_S3_BUCKET --delete
+ run: aws s3 sync public/assets/ s3://$STEP_S3_BUCKET/assets/ --delete
- name: SetS3Acl
run: aws s3api put-object-acl --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers --bucket $STEP_S3_BUCKET --key assets/lab.yaml
- name: SetS3Acl
diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml
new file mode 100644
index 00000000..88d8743f
--- /dev/null
+++ b/.github/workflows/pull-request.yml
@@ -0,0 +1,32 @@
+name: ValidatePR
+
+permissions:
+ id-token: write
+on:
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ buildAndVerify:
+ runs-on: ubuntu-latest
+ env:
+ STEP_S3_BUCKET: 'test.amazon-dynamodb-labs.com'
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
+ fetch-depth: '0'
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+ - name: Configure AWS Credentials
+ uses: aws-actions/configure-aws-credentials@v1
+ with:
+ aws-region: us-east-1
+ role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
+ - name: Pull preview build
+ run: aws s3 sync s3://amazon-dynamodb-labs-static/build/ . && chmod +x preview_build
+ - name: Build Assets
+ run: python3 ./.github/scripts/build-assets.py
diff --git a/.gitmodules b/.gitmodules
index e8d81a2d..3604c796 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,4 +1,4 @@
[submodule "themes/learn"]
path = themes/learn
url = https://github.com/switch180/hugo-theme-learn.git
- branch = aws
+ branch = aws
\ No newline at end of file
diff --git a/README.md b/README.md
index db6cbf70..a4096ff8 100644
--- a/README.md
+++ b/README.md
@@ -1,46 +1,25 @@
# Amazon DynamoDB Labs
+The repo for https://catalog.workshops.aws/dynamodb-labs/en-US , formerly https://amazon-dynamodb-labs.com
-### Setup:
+### Dev:
-#### Install Hugo:
-On a mac:
-
-`brew install hugo`
-
-On Linux:
- - Download from the releases page: https://github.com/gohugoio/hugo/releases/tag/v0.102.3
- - Extract and save the hugo executable to `/usr/local/bin/`
-
-Note: This workshop is built with [hugo v0.102.3](https://github.com/gohugoio/hugo/releases/tag/v0.102.3). Older versions may produce errors due to the aws theme we use.
+#### Local development
+You can make code changes and markdown changes, but in order to test the build you need to be an Amazon employee with access to preview_build to compile the documentation and run the site locally. [Amazon employees click here for instructions](https://tiny.amazon.com/16x21plc5).
#### Clone this repo:
-From wherever you checkout repos:
+We suggest you make a fork. From wherever you are you can checkout the repo:
`git clone git@github.com:aws-samples/amazon-dynamodb-labs.git` (or your fork)
-#### Clone the theme submodule:
-`cd amazon-dynamodb-labs`
-
-`git submodule init; git submodule update`
-
-
-#### Run Hugo locally:
-To run hugo in development:
-`hugo serve -D`
-
-`hugo` will build your content locally and output to `./public/`
-
-
-#### View Hugo locally:
-Visit http://localhost:1313/ to see the site.
-
-#### Making Edits:
-As you save edits to a page, the site will live-reload to show your changes.
+#### Making edits:
+Amazon employees only: Make changes, run preview_build, check localhost:8080 to see the site locally
+Everyone else: make changes, make a pull request, and wait for the automations to run. They will tell you if you have errors in your changes.
-#### Auto deploy:
+#### Pull requests
+Make a pull request with changes. PRs will be automatically checked to make sure their markdown and other files are correct and without error using an automatic GitHub action. With each commit in a PR, the action will run to verify.
-Within minutes of a commit to the master branch, a build and deploy using the default hugo grav learn theme will kick off. You can review your change at the following address.
+#### On merge to master
-https://master.amazon-dynamodb-labs.com
+On merge to master, a GitHub action will deploy the assets to amazon-dynamodb-labs.com and verify the build to ensure the markdown and other files are correctly formatted. From there, a maintainer must manually pull the changes and push to https://catalog.workshops.aws/dynamodb-labs/en-US
## License
This project is licensed under the Apache-2.0 License.
diff --git a/config.toml b/config.toml
deleted file mode 100644
index 31226439..00000000
--- a/config.toml
+++ /dev/null
@@ -1,64 +0,0 @@
-RelativeURLs=true
-CanonifyURLs=true
-languageCode = "en-US"
-defaultContentLanguage = "en"
-
-title = "Amazon DynamoDB Workshop & Labs"
-theme = "learn"
-metaDataFormat = "yaml"
-defaultContentLanguageInSubdir= false
-
-uglyurls = true
-sectionPagesMenu = "main"
-pygmentsCodeFences = true
-pygmentsStyle = "monokai"
-
-
-
-[params]
- editURL = "https://github.com/aws-samples/amazon-dynamodb-labs/blob/master/content/"
- description = "Hands on labs and real world design scenarios for Amazon DynamoDB"
- author = "Sean Shriver"
- disableBreadcrumb = false
- disableNextPrev = false
- showVisitedLinks = true
- themeVariant = "aws"
- disableSearch = false
- disableAssetsBusting = true
- disableLanguageSwitchingButton = true
- disableShortcutsTitle = true
- disableInlineCopyToClipBoard = true
- #AWS custom params
- design_patterns_s3_lab_yaml = "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/C9.yaml"
- lhol_migration_setup_yaml = "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/migration-env-setup.yaml"
- lhol_migration_dms_setup_yaml = "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/migration-dms-setup.yaml"
- event_driven_architecture_lab_yaml = "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/event-driven-cfn.yaml"
- latest_rh_design_pattern_yt = "https://www.youtube.com/watch?v=xfxBhvGpoa0"
- github_contributing_guide = "https://github.com/aws-samples/amazon-dynamodb-labs/blob/master/CONTRIBUTING.md"
- github_issues_link = "https://github.com/aws-samples/amazon-dynamodb-labs/issues"
-
-[outputs]
-home = [ "HTML", "RSS", "JSON"]
-
-[markup.goldmark.renderer]
-unsafe= true
-
-#
-[menu]
-[[menu.shortcuts]]
-name = " Contributing (GitHub)"
-url = "https://github.com/aws-samples/amazon-dynamodb-labs/blob/master/CONTRIBUTING.md"
-weight = 2
-
-[[menu.shortcuts]]
-name = " Authors"
-url = "/authors.html"
-weight = 3
-
-
-
-[Languages]
-[Languages.en]
-title = "Amazon DynamoDB Workshop & Labs"
-weight = 1
-languageName = "English"
diff --git a/content/all-content.en.md b/content/all-content.en.md
deleted file mode 100644
index 13720b0e..00000000
--- a/content/all-content.en.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-hidden: true
-chapter: true
-type: "all-content"
-description: "Placeholder for an experimental single page holding all doc pages, generated during the build process"
----
diff --git a/content/authors.en.md b/content/authors.en.md
index 0aa2aa4c..29da9a05 100644
--- a/content/authors.en.md
+++ b/content/authors.en.md
@@ -1,8 +1,9 @@
---
title: "Contributors to Amazon DynamoDB Labs"
-hidden: true
+hidden: false
chapter: true
description: "Our editors and hall of fame."
+weight: 100
---
@@ -16,12 +17,14 @@ description: "Our editors and hall of fame."
The serverless event driven architecture lab was added in 2023:
-1. Lucas Rettenmeier ([@rettenls](https://github.com/rettenls)) - Workshop creator for re:Invent 2021
-1. Kirill Bogdanov - ([@kirillsc](https://github.com/kirillsc)) - Workshop creator for re:Invent 2021
-1. Sean Shriver - ([@switch180](https://github.com/switch180)) - Presenter of the workshop at re:Invent 2021. Edited and merged the lab to labs.com.
+1. Lucas Rettenmeier ([@rettenls](https://github.com/rettenls)) - Workshop creator for re\:Invent 2021
+1. Kirill Bogdanov - ([@kirillsc](https://github.com/kirillsc)) - Workshop creator for re\:Invent 2021
+1. Sean Shriver - ([@switch180](https://github.com/switch180)) - Presenter of the workshop at re\:Invent 2021. Edited and merged the lab to labs.com.
1. John Terhune - ([@terhunej](https://github.com/terhunej)) - Prepared the lab guide for publshing to labs.com, editing and updating.
+The lab guide was migrated from amazon-dynamodb-labs.com to Workshop Studio in December of 2023:
+1. Sean Shriver - ([@switch180](https://github.com/switch180)) - Refactored every documentation page for the new Workshop Studio proprietary format.
### 2021 editors
@@ -45,6 +48,6 @@ The following individuals put in hours of their time to revamp the guide to make
### Original version
-This lab was built to run on Qwiklabs in 2018. In 2020 it was rewritten and updated to run outside Qwiklabs.
+LADV was built to run on Qwiklabs in 2018. In 2020 it was rewritten and updated to run outside Qwiklabs.
-A special thanks goes to Regis Gimenis ([regisgimenis](https://github.com/regisgimenis)) who is the original designer of the advanced design patterns. He did one of the most difficult tasks - creating a lab from scratch. Remnants of Regis' work are found throughout the Python files of the workshop and the lab guide. Without him, this site would not exist.
+A special thanks goes to Regis Gimenis ([regisgimenis](https://github.com/regisgimenis)) who is the original designer of the advanced design patterns. He did one of the most difficult tasks - creating a lab from scratch. Remnants of Regis' work are found throughout the Python files of the workshop and the lab guide for LADV. Without him, this site would not exist.
diff --git a/content/design-patterns/ex1capacity/Step4.en.md b/content/design-patterns/ex1capacity/Step4.en.md
index 1d5966c6..3c72bc7c 100644
--- a/content/design-patterns/ex1capacity/Step4.en.md
+++ b/content/design-patterns/ex1capacity/Step4.en.md
@@ -9,23 +9,21 @@ To view the Amazon CloudWatch metrics for your table:
1. Navigate to the DynamoDB section of the AWS management console.
2. As shown in the following image, in the navigation pane, choose Tables. Choose the logfile table, and in the right pane, choose the Metrics tab
- ![Open the CloudWatch metrics for the table](/images/awsnewconsole3.png)
+ ![Open the CloudWatch metrics for the table](/static/images/awsnewconsole3.png)
-
+
The CloudWatch metrics will look like what you see in the following image.
-![The Cloud Watch metrics for the base table](/images/tablelogfile-stats.png)
+![The Cloud Watch metrics for the base table](/static/images/tablelogfile-stats.png)
-{{% notice note %}}
-You might not see provisioned capacity data in your read or write capacity graphs, which are displayed as red lines. It takes time for DynamoDB to generate provisioned capacity CloudWatch metrics, especially for new tables.
-{{% /notice %}}
+::alert[You might not see provisioned capacity data in your read or write capacity graphs, which are displayed as red lines. It takes time for DynamoDB to generate provisioned capacity CloudWatch metrics, especially for new tables.]
The CloudWatch metrics will look like what you see in the following image for the global secondary index.
-![The Cloud Watch metrics for the GSI](/images/GSI-logfile-stats.png)
+![The Cloud Watch metrics for the GSI](/static/images/GSI-logfile-stats.png)
**You might be wondering:** Why are there throttling events on the table but not on the global secondary index? The reason is a base table receives the writes immediately and consumes write capacity doing so, whereas a global secondary index's capacity is consumed asynchronously some time after the initial write to the base table succeeds. In order for this system to work inside the DynamoDB service, there is a buffer between a given base DynamoDB table and a global secondary index (GSI). A base table will quickly surface a throttle if capacity is exhausted, whereas only an imbalance over an extended period of time on a GSI will cause the buffer to fill, thereby generating a throttle. In short, a GSI is more forgiving in the case of an imbalanced access pattern.
diff --git a/content/design-patterns/ex1capacity/Step6.en.md b/content/design-patterns/ex1capacity/Step6.en.md
index c116df50..5561f050 100644
--- a/content/design-patterns/ex1capacity/Step6.en.md
+++ b/content/design-patterns/ex1capacity/Step6.en.md
@@ -21,6 +21,4 @@ row: 2000 in 0.8817043304443359
RowCount: 2000, Total seconds: 17.13607406616211
```
-{{% notice note %}}
-With the new capacity, the total load time is lower.
-{{% /notice %}}
+::alert[With the new capacity, the total load time is lower.]
\ No newline at end of file
diff --git a/content/design-patterns/ex1capacity/Step7.en.md b/content/design-patterns/ex1capacity/Step7.en.md
index 4dba04c8..d90e7ebc 100644
--- a/content/design-patterns/ex1capacity/Step7.en.md
+++ b/content/design-patterns/ex1capacity/Step7.en.md
@@ -56,9 +56,7 @@ ProvisionedThroughputExceededException: An error occurred (ProvisionedThroughput
You can pause the operation by typing Ctrl+Z (Ctrl+C if you are Mac user).
-{{% notice note %}}
-This new table has more RCUs (1,000) and WCUs (1,000), but you still got an error and the load time increased.
-{{% /notice %}}
+::alert[This new table has more RCUs (1,000) and WCUs (1,000), but you still got an error and the load time increased.]{type="warning"}
**Topic for discussion:** Can you explain the behavior of the test? An exception named `ProvisionedThroughputExceededException` was returned by DynamoDB with an exception message suggesting the provisioned capacity of the GSI be increased. This is a telling error, and one that needs to be acted upon. In short, if you want 100% of the writes on the DynamoDB base table to be copied into the GSI, then the GSI should be provisioned with 100% (the same amount) of the capacity on the base table, which should be 1,000 WCU in this example. Simply put, the GSI was under-provisioned.
@@ -70,27 +68,23 @@ Open the AWS console, or switch to your browser tab with the AWS console, to vie
The following image shows the write capacity metric for the `logfile_gsi_low` table. Note that the consumed writes (the blue line) were lower than the provisioned writes (red line) for the table during the test. This tells us the base table had sufficient write capacity for the surge of requests.
-{{% notice note %}}
-It may take a few minutes for the provisioned capacity (red line) to show up in the graphs. The provisioned capacity metrics are synthetic and there can be delays of five to ten minutes until they show a change.
-{{% /notice %}}
+::alert[It may take a few minutes for the provisioned capacity (red line) to show up in the graphs. The provisioned capacity metrics are synthetic and there can be delays of five to ten minutes until they show a change.]
-![Write capacity metric for the table](/images/lowgsi-table-wc.png)
+![Write capacity metric for the table](/static/images/lowgsi-table-wc.png)
The following image shows the write capacity metric for the global secondary index. Note that the consumed writes (the blue line) were higher than the provisioned writes (red line) for the global secondary index during the test. This tells us the GSI was woefully under-provisioned for the requests it received.
-![Write capacity metric for the GSI](/images/lowgsi-gsi1-wc.png)
+![Write capacity metric for the GSI](/static/images/lowgsi-gsi1-wc.png)
The following image shows the throttled write requests for the `logfile_gsi_low` table. Note that the table has throttled write requests, even though the base table was provisioned with sufficient WCUs. Each throttled API request on DynamoDB generates one datapoint for the `ThrottledRequests` metric. In this picture, about 20 API requests were throttled by DynamoDB. However, the table has a GSI and we do not yet know if it, or the base table was the source of the throttle. We must continue investigating.
-![Throttled writes for the table](/images/lowgsi-table-throttles.png)
+![Throttled writes for the table](/static/images/lowgsi-table-throttles.png)
To identify the source of these throttled write requests, review the throttled write events metric. If the DynamoDB base table is the throttle source, it will have `WriteThrottleEvents`. However, if the GSI has insufficient write capacity, it will have `WriteThrottleEvents`.
When you review the throttle events for the GSI, you will see the source of our throttles! Only the GSI has 'Throttled write events', which means it is the source of throttling on the table, and the cause of the throttled Batch write requests.
-![Throttled writes for the GSI](/images/lowgsi-gsi1-throttles.png)
-{{% notice note %}}
-It may take some time for the write throttle events to appear on the GSI throttled write events graph. If you don't immediately see metrics, re-run the command above to load data into DynamoDB and let it continue for several minutes so that many throttling events are created.
-{{% /notice %}}
+![Throttled writes for the GSI](/static/images/lowgsi-gsi1-throttles.png)
+::alert[It may take some time for the write throttle events to appear on the GSI throttled write events graph. If you don't immediately see metrics, re-run the command above to load data into DynamoDB and let it continue for several minutes so that many throttling events are created.]{type="warning"}
When a DynamoDB global secondary index's write throttles are sufficient enough to create throttled requests, the behavior is called GSI back pressure. Throttled requests are `ProvisionedThroughputExceededException` errors in the AWS SDKs, generate `ThrottledRequests` metrics in CloudWatch, and appear as 'throttled write requests' on the base table in the AWS console. When GSI back pressure occurs, all writes to the DynamoDB table are rejected until space in the buffer between the DynamoDB base table and GSI opens up. Regardless of whether a new row is destined for a GSI, writes for a time will be rejected on the base table until space is available - DynamoDB does not have time to determine if a row to be written will be in the GSI or not. This is a troubling situation, but it's an unavoidable constraint from DynamoDB because the service cannot create a buffer of unlimited size between your base table and GSI; there must be a limit to the number of items waiting to be copied from the base table into a GSI. In order to be aware of this behavior early, it's important to monitor throttled requests and events on your DynamoDB table and GSI.
diff --git a/content/design-patterns/ex1capacity/_index.en.md b/content/design-patterns/ex1capacity/index.en.md
similarity index 90%
rename from content/design-patterns/ex1capacity/_index.en.md
rename to content/design-patterns/ex1capacity/index.en.md
index 19770cfd..193887e8 100644
--- a/content/design-patterns/ex1capacity/_index.en.md
+++ b/content/design-patterns/ex1capacity/index.en.md
@@ -1,9 +1,8 @@
---
-title: "DynamoDB Capacity Units and Partitioning"
+title: "Exercise 1: DynamoDB Capacity Units and Partitioning"
date: 2019-12-02T10:16:44-08:00
weight: 2
chapter: true
-pre: "Exercise 1: "
description: "Learn about provisioned capacity."
---
diff --git a/content/design-patterns/ex2scan/Step1.en.md b/content/design-patterns/ex2scan/Step1.en.md
index bc994ecd..1179f4ad 100644
--- a/content/design-patterns/ex2scan/Step1.en.md
+++ b/content/design-patterns/ex2scan/Step1.en.md
@@ -20,9 +20,7 @@ The following code block scans the table.
ProjectionExpression='bytessent')
```
-{{% notice info %}}
-You can review the file on your own with `vim ~/workshop/scan_logfile_simple.py`. Type `:q` and hit enter to exit vim.
-{{% /notice %}}
+::alert[You can review the file on your own with `vim ~/workshop/scan_logfile_simple.py`. Type `:q` and hit enter to exit vim.]
Notice that there is a `Limit` parameter set in the `Scan` command. A single `Scan` operation will read up to the maximum number of items set (if using the `Limit` parameter) or a maximum of 1 MB of data, and then apply any filtering to the results by using `FilterExpression`. If the total number of scanned items exceeds the maximum set by the limit parameter or the data set size limit of 1 MB, the scan stops and results are returned to the user as a `LastEvaluatedKey` value. This value can be used in a subsequent operation so that you can pick up where you left off.
diff --git a/content/design-patterns/ex2scan/Step2.en.md b/content/design-patterns/ex2scan/Step2.en.md
index 224773c3..d53acd98 100644
--- a/content/design-patterns/ex2scan/Step2.en.md
+++ b/content/design-patterns/ex2scan/Step2.en.md
@@ -59,9 +59,7 @@ Scanning 1 million rows of the `logfile_scan` table to get the total of bytes se
Total bytessent 6054250 in 8.544446229934692 seconds
```
-{{% notice note %}}
-The execution time using a parallel scan will be shorter than the execution time for a sequential scan. The difference in execution time will be even more exaggerated for larger tables.
-{{% /notice %}}
+::alert[The execution time using a parallel scan will be shorter than the execution time for a sequential scan. The difference in execution time will be even more exaggerated for larger tables.]
#### Summary
diff --git a/content/design-patterns/ex2scan/_index.en.md b/content/design-patterns/ex2scan/index.en.md
similarity index 94%
rename from content/design-patterns/ex2scan/_index.en.md
rename to content/design-patterns/ex2scan/index.en.md
index bbad0a7c..ff73e35c 100644
--- a/content/design-patterns/ex2scan/_index.en.md
+++ b/content/design-patterns/ex2scan/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Sequential and Parallel Table Scans"
+title: "Exercise 2: Sequential and Parallel Table Scans"
date: 2019-12-02T10:17:10-08:00
weight: 3
chapter: true
-pre: "Exercise 2: "
description: "Learn the difference between sequential and parallel scans."
---
@@ -16,4 +15,4 @@ In order to maximize the utilization of table-level provisioning, use a parallel
The following diagram shows how a multithreaded application performs a parallel `Scan` with three application worker threads. The application spawns three threads and each thread issues a `Scan` request, scans its designated segment, retrieving data 1 MB at a time, and returns the data to the main application thread.
-![Parallel Scan](/images/image7.jpg)
+![Parallel Scan](/static/images/image7.jpg)
diff --git a/content/design-patterns/ex3gsisharding/Step2.en.md b/content/design-patterns/ex3gsisharding/Step2.en.md
index f0af2986..d75352b7 100644
--- a/content/design-patterns/ex3gsisharding/Step2.en.md
+++ b/content/design-patterns/ex3gsisharding/Step2.en.md
@@ -57,6 +57,6 @@ Number of records with responsecode 404 is 5500. Query time: 1.190359354019165 s
#### Review
-In this exercise, we used a sharded global secondary index (GSI) to quickly retrieve sorted results, which used composite keys that are covered later in the lab in [Exercise 6]({{< ref "design-patterns/ex6compos" >}}). Use GSI write sharding when you need a scalable sorted index.
+In this exercise, we used a sharded global secondary index (GSI) to quickly retrieve sorted results, which used composite keys that are covered later in the lab in :link[Exercise 6]{href="/design-patterns/ex6compos"}. Use GSI write sharding when you need a scalable sorted index.
The sharded GSI example used a set range of keys from 0 to 9 inclusive, but in your own application you can choose any range. In your application, you can add more shards as the number of items indexed increase. In each shard, the data is sorted on disk by the sort key. This allowed us to retrieve server access logs sorted by status code and the date, e.g. `404#2017-07-21`.
For more information on how to choose the right number of shards, read [Choosing the right number of shards for your large-scale Amazon DynamoDB table](https://aws.amazon.com/blogs/database/choosing-the-right-number-of-shards-for-your-large-scale-amazon-dynamodb-table/) on the AWS Database Blog.
diff --git a/content/design-patterns/ex3gsisharding/_index.en.md b/content/design-patterns/ex3gsisharding/index.en.md
similarity index 77%
rename from content/design-patterns/ex3gsisharding/_index.en.md
rename to content/design-patterns/ex3gsisharding/index.en.md
index 9f18cd39..992d6b26 100644
--- a/content/design-patterns/ex3gsisharding/_index.en.md
+++ b/content/design-patterns/ex3gsisharding/index.en.md
@@ -1,23 +1,22 @@
---
-title: "Global Secondary Index Write Sharding"
+title: "Exercise 3: Global Secondary Index Write Sharding"
date: 2019-12-02T10:17:22-08:00
weight: 4
chapter: true
-pre: "Exercise 3: "
description: "Query a sharded global secondary index to quickly read sorted data by status code and date."
---
The primary key of a DynamoDB table or a global secondary index consists of a partition key and an optional sort key. The way you design the content of those keys is extremely important for the structure and performance of your database. Partition key values determine the logical partitions in which your data is stored. Therefore, it is important to choose a partition key value that uniformly distributes the workload across all partitions in the table or global secondary index. For a discussion on how to choose the right partition key, see our blog titled [Choosing the Right DynamoDB Partition Key](https://aws.amazon.com/blogs/database/choosing-the-right-dynamodb-partition-key/).
-In this exercise, you learn about global secondary index write sharding, which is an effective design pattern to query selectively the items spread across different logical partitions in a very large table. Let's review the server access logs example [from Exercise 1]({{< ref "design-patterns/ex1capacity" >}}), which is based on Apache service access logs. This time, you query the items with response code `4xx`. Note that the items with response code `4xx` are a very small percentage of the total data and do not have an even distribution by response code. For example, the response code `200 OK` has more records than the others, which is as expected for any web application.
+In this exercise, you learn about global secondary index write sharding, which is an effective design pattern to query selectively the items spread across different logical partitions in a very large table. Let's review the server access logs example [from Exercise 1]{href="/design-patterns/ex1capacity"}, which is based on Apache service access logs. This time, you query the items with response code `4xx`. Note that the items with response code `4xx` are a very small percentage of the total data and do not have an even distribution by response code. For example, the response code `200 OK` has more records than the others, which is as expected for any web application.
The following chart shows the distribution of the log records by response code for the sample file, `logfile_medium1.csv`.
-![Log records by response code](/images/image8.jpg)
+![Log records by response code](/static/images/image8.jpg)
You will create a write-sharded global secondary index on a table to randomize the writes across multiple logical partition key values. In effect, this increases the write and read throughput of the application. To apply this design pattern, you can create a random number from a fixed set (for example, 1 to 10), and use this number as the logical partition key for a global secondary index. Because you are randomizing the partition key, the writes to the table are spread evenly across all of the partition key values independent of any attribute. This will yield better parallelism and higher overall throughput. Also, if the application needs to query the log records by a specific response code on a specific date, you can create a composite sort key using a combination of the response code and the date.
-In this exercise, you create a global secondary index using random values for the partition key, and the composite key `responsecode#date#hourofday` as the sort key. The `logfile_scan` table that you created and populated during the preparation phase of the workshop already has these two attributes. If you did not complete the setup steps, return to [Setup - Step 6]({{< ref "design-patterns/setup/Step6" >}}) and complete the step. These attributes were created using the following code.
+In this exercise, you create a global secondary index using random values for the partition key, and the composite key `responsecode#date#hourofday` as the sort key. The `logfile_scan` table that you created and populated during the preparation phase of the workshop already has these two attributes. If you did not complete the setup steps, return to :link[Setup - Step 6]{href="/design-patterns/setup/Step6"} and complete the step. These attributes were created using the following code.
```py
SHARDS = 10
diff --git a/content/design-patterns/ex4gsioverload/Step2.en.md b/content/design-patterns/ex4gsioverload/Step2.en.md
index c77f58c4..d2c58594 100644
--- a/content/design-patterns/ex4gsioverload/Step2.en.md
+++ b/content/design-patterns/ex4gsioverload/Step2.en.md
@@ -16,7 +16,7 @@ The sample `employees.csv` record looks like the following:
1000,Nanine Denacamp,Programmer Analyst,Development,San Francisco,CA,1981-09-30,2014-06-01,Senior Database Administrator,2014-01-25
```
-When you ingest this data into the table, you concatenate some of the attributes, such as `city_dept` (example: San Francisco:Development) because you have an access pattern in the query that takes advantage of this concatenated attribute. The `SK` attribute is also a derived attribute. The concatenation is handled in the Python script, which assembles the record and then executes a `put_item()` to write the record to the table.
+When you ingest this data into the table, you concatenate some of the attributes, such as `city_dept` (example: San Francisco\:Development) because you have an access pattern in the query that takes advantage of this concatenated attribute. The `SK` attribute is also a derived attribute. The concatenation is handled in the Python script, which assembles the record and then executes a `put_item()` to write the record to the table.
Output:
@@ -34,11 +34,11 @@ The output confirms that 1000 items have been inserted to the table.
Review the `employees` table in the DynamoDB console (as shown in the following screenshot) by choosing the **employees** table and then choosing the **Items** menu item.
-![Employees table](/images/employeestablenew.png)
+![Employees table](/static/images/employeestablenew.png)
On the same page in the right pane, choose **[Index]** from the dropdown menu and then click **Run**.
-![Searching the GSI](/images/awsconsolescan.png)
+![Searching the GSI](/static/images/awsconsolescan.png)
Now you can see result of "Scan" operation on an overloaded global secondary index. There are many different entity types in the result set: a root, a previous title, and a current title.
-![Global Secondary Index](/images/employees-scan-GSI.png)
+![Global Secondary Index](/static/images/employees-scan-GSI.png)
diff --git a/content/design-patterns/ex4gsioverload/Step3.en.md b/content/design-patterns/ex4gsioverload/Step3.en.md
index 3bbe4153..a85bfa4b 100644
--- a/content/design-patterns/ex4gsioverload/Step3.en.md
+++ b/content/design-patterns/ex4gsioverload/Step3.en.md
@@ -46,7 +46,7 @@ You can try a different US state by changing the last parameter of the command.
python query_employees.py employees state 'TX'
```
-{{%expand "If you want to query other states, click here to open the list of US states with some data in the table" %}}
+::::expand{header="If you want to query other states, click here to open the list of US states with some data in the table"}
| | | | | |
| --- | --- | --- | --- | --- |
@@ -54,7 +54,7 @@ python query_employees.py employees state 'TX'
| MA | MD | MI | NC | NY |
| OR | PA | TN | TX | WA |
-{{% /expand%}}
+::::
@@ -95,7 +95,7 @@ You also can try a different title, as shown in the following python command.
python query_employees.py employees current_title 'IT Support Manager'
```
-{{%expand "If you want to know the list of all the available titles, click here!" %}}
+::::expand{header="If you want to know the list of all the available titles, click here!"}
| | | | | | |
| ---------------------------------------- | ----------------------------- | ---------------------------------- | ---------------------------------- | --------------------------------------- | ------------------------------- |
@@ -112,7 +112,7 @@ python query_employees.py employees current_title 'IT Support Manager'
| Systems Analyst | System Architect | Systems Designer | Systems Software Engineer | Technical Operations Officer | Technical Support Engineer |
| Technical Support Specialist | Technical Specialist | Telecommunications Specialist | Web Administrator | Web Developer | Webmaster |
-{{% /expand%}}
+::::
diff --git a/content/design-patterns/ex4gsioverload/_index.en.md b/content/design-patterns/ex4gsioverload/index.en.md
similarity index 95%
rename from content/design-patterns/ex4gsioverload/_index.en.md
rename to content/design-patterns/ex4gsioverload/index.en.md
index 47a1ee1e..293fa7fa 100644
--- a/content/design-patterns/ex4gsioverload/_index.en.md
+++ b/content/design-patterns/ex4gsioverload/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Global Secondary Index Key Overloading"
+title: "Exercise 4: Global Secondary Index Key Overloading"
date: 2019-12-02T10:17:33-08:00
weight: 5
chapter: true
-pre: "Exercise 4: "
description: "Explore how to maintain the ability to query on many attributes when you have a multi-entity table."
---
@@ -18,4 +17,4 @@ The access patterns required for this scenario are:
The following screenshot shows the design of the `employees table`. The attribute called `PK` has the employee ID, which is prefixed by the letter `e`. The hash sign (#) is used as a separator between the entity type identifier (`e`) and the actual employee ID. The `SK` is an overloaded attribute, and has either current title, previous title, or the keyword `root`, which denotes the primary item for the employee that holds most of their important attributes. The `GSI_1_PK` attribute includes the title or the name of the employee. The re-use of a given global secondary index for multiple entity types such as employees, employee locations, and employee titles lets us simplify our management of the DynamoDB table because we only need to monitor and pay for one global secondary index as opposed to three separate indexes.
-![Sample design for the Employees table using the GSI overloading pattern](/images/employeestablenew.png)
+![Sample design for the Employees table using the GSI overloading pattern](/static/images/employeestablenew.png)
diff --git a/content/design-patterns/ex5sparse/Step1.en.md b/content/design-patterns/ex5sparse/Step1.en.md
index 5c8b642c..7b49f017 100644
--- a/content/design-patterns/ex5sparse/Step1.en.md
+++ b/content/design-patterns/ex5sparse/Step1.en.md
@@ -49,6 +49,4 @@ Wait until the new index is `ACTIVE` before proceeding.
]
```
-{{% notice warning %}}
-Do not continue until the `IndexStatus` is `ACTIVE` on both indexes. Querying the index before it is `ACTIVE` will result in a failed query.
-{{% /notice %}}
+::alert[Do not continue until the `IndexStatus` is `ACTIVE` on both indexes. Querying the index before it is `ACTIVE` will result in a failed query.]{type="warning"}
diff --git a/content/design-patterns/ex5sparse/_index.en.md b/content/design-patterns/ex5sparse/index.en.md
similarity index 91%
rename from content/design-patterns/ex5sparse/_index.en.md
rename to content/design-patterns/ex5sparse/index.en.md
index 9009a0c1..6e7a585e 100644
--- a/content/design-patterns/ex5sparse/_index.en.md
+++ b/content/design-patterns/ex5sparse/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Sparse Global Secondary Indexes"
+title: "Exercise 5: Sparse Global Secondary Indexes"
date: 2019-12-02T10:17:48-08:00
weight: 6
chapter: true
-pre: "Exercise 5: "
description: "Learn how to cut down the resources required for your searches on uncommon attributes."
---
diff --git a/content/design-patterns/ex6compos/Step1.en.md b/content/design-patterns/ex6compos/Step1.en.md
index ba996769..7e42f813 100644
--- a/content/design-patterns/ex6compos/Step1.en.md
+++ b/content/design-patterns/ex6compos/Step1.en.md
@@ -62,6 +62,4 @@ The `KeyConditionExpression` looks like the following.
Key('GSI_3_PK').eq("state#{}".format('TX')) & Key('GSI_3_SK').begins_with('Austin')
```
-{{% notice warning %}}
-Wait until the `IndexStatus` is `ACTIVE` on all indexes before continuing. If you try to query a GSI but it is not finished creating, you will receive an error.
-{{% /notice %}}
+::alert[Wait until the `IndexStatus` is `ACTIVE` on all indexes before continuing. If you try to query a GSI but it is not finished creating, you will receive an error.]{type="warning"}
diff --git a/content/design-patterns/ex6compos/Step2.en.md b/content/design-patterns/ex6compos/Step2.en.md
index b5d72301..90656e37 100644
--- a/content/design-patterns/ex6compos/Step2.en.md
+++ b/content/design-patterns/ex6compos/Step2.en.md
@@ -7,7 +7,7 @@ weight: 2
You can use the new global secondary index to query the table. If you use only the state, the query does not use the sort key attribute. However, if the query has a value for the second parameter, the code uses the `GSI_3_SK` attribute of the global secondary index, which holds the same value as the `city_dept` attribute, to query all the values that begin with the parameter value.
The following screenshot shows using composite key attributes to query by city and department.
-![Using Composite key attributes to query by city and department](/images/employees-GSI3.png)
+![Using Composite key attributes to query by city and department](/static/images/employees-GSI3.png)
We can perform this same query in a Python script. This snippet shows how a script can take two input parameters (shown as value1 and value2) and craft a query against the GSI_3 global secondary index.
diff --git a/content/design-patterns/ex6compos/_index.en.md b/content/design-patterns/ex6compos/index.en.md
similarity index 97%
rename from content/design-patterns/ex6compos/_index.en.md
rename to content/design-patterns/ex6compos/index.en.md
index d9b28088..52e35a6a 100644
--- a/content/design-patterns/ex6compos/_index.en.md
+++ b/content/design-patterns/ex6compos/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Composite Keys"
+title: "Exercise 6: Composite Keys"
date: 2019-12-02T10:17:57-08:00
weight: 7
chapter: true
-pre: "Exercise 6: "
description: "Learn how to combine two attributes into one to take advantage of the DynamoDB sort key."
---
diff --git a/content/design-patterns/ex7adjlists/Step2.en.md b/content/design-patterns/ex7adjlists/Step2.en.md
index fb261892..a1b52ee2 100644
--- a/content/design-patterns/ex7adjlists/Step2.en.md
+++ b/content/design-patterns/ex7adjlists/Step2.en.md
@@ -8,6 +8,6 @@ In the DynamoDB console, open the **InvoiceAndBills** table and choose the **Ite
In the output, choose **PK** to sort the data in reverse. Notice the different entity types in the same table.
-![Adjacency Lists](/images/invoice-bills-GSI1.png)
+![Adjacency Lists](/static/images/invoice-bills-GSI1.png)
In the following steps you will query the table and retrieve different entity types. Optionally consider performing the queries in the AWS console right after you query them with the Python scripts for extra insight.
diff --git a/content/design-patterns/ex7adjlists/_index.en.md b/content/design-patterns/ex7adjlists/index.en.md
similarity index 97%
rename from content/design-patterns/ex7adjlists/_index.en.md
rename to content/design-patterns/ex7adjlists/index.en.md
index 1a9cbdb6..18f50eb7 100644
--- a/content/design-patterns/ex7adjlists/_index.en.md
+++ b/content/design-patterns/ex7adjlists/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Adjacency Lists"
+title: "Exercise 7: Adjacency Lists"
date: 2020-04-07T10:18:07-08:00
weight: 8
chapter: true
-pre: "Exercise 7: "
description: "Learn how to store multiple entity types in one DynamoDB table."
---
diff --git a/content/design-patterns/ex8streams/Step1.en.md b/content/design-patterns/ex8streams/Step1.en.md
index 9914fc11..469fb5a7 100644
--- a/content/design-patterns/ex8streams/Step1.en.md
+++ b/content/design-patterns/ex8streams/Step1.en.md
@@ -4,7 +4,7 @@ date: 2019-12-02T12:34:06-08:00
weight: 1
---
-![DynamoDB stream with Lambda](/images/image6.jpg)
+![DynamoDB stream with Lambda](/static/images/image6.jpg)
Let's create a table named `logfile_replica` to hold the replicated rows. This `create-table` command is based on the command to create the `logfile` table. As a result, it creates a table that can hold the same items as its upstream table.
diff --git a/content/design-patterns/ex8streams/Step5.en.md b/content/design-patterns/ex8streams/Step5.en.md
index 71b48610..8b781c02 100644
--- a/content/design-patterns/ex8streams/Step5.en.md
+++ b/content/design-patterns/ex8streams/Step5.en.md
@@ -13,19 +13,15 @@ aws lambda create-event-source-mapping \
--event-source-arn YOUR_STREAM_ARN_HERE
```
-{{% notice info %}}
-You must copy the full stream label ARN, including the timestamp on the end
-{{% /notice %}}
-
-{{%expand "Click here for an example" %}}
+::alert[You must copy the full stream label ARN, including the timestamp on the end]
+::::expand{header="Click here for an example"}
```bash
aws lambda create-event-source-mapping \
--function-name ddbreplica_lambda --enabled --batch-size 100 --starting-position TRIM_HORIZON \
--event-source-arn arn:aws:dynamodb:::table/logfile/stream/2021-12-31T00:00:00.000
```
-
-{{% /expand%}}
+::::
The following is the expected result.
diff --git a/content/design-patterns/ex8streams/Step6.en.md b/content/design-patterns/ex8streams/Step6.en.md
index 2657aa39..43aa98d9 100644
--- a/content/design-patterns/ex8streams/Step6.en.md
+++ b/content/design-patterns/ex8streams/Step6.en.md
@@ -57,12 +57,10 @@ NEXTTOKEN eyJFeGNsdXNpdmVTdGFydEtleSI6IG51bGwsICJib3RvX3RydW5jYXRlX2Ftb3Vu
If you ran the lab on your own AWS account, you should delete all the tables made during these exercises. If you are at an AWS event using the AWS Workshop platform (Workshop Studio), you do not need to delete your tables.
-{{% notice warning %}}
-During the course of the lab, you created DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. You must delete the DynamoDB tables using the DynamoDB console to clean up the lab. In addition, if you are not part of an AWS event or you are running this lab in your own account, make sure you delete the CloudFormation stack as soon as the lab is complete. If you're using Workshop Studio Event Delivery, you don't need to delete the CloudFormation stack.
-{{% /notice %}}
+::alert[During the course of the lab, you created DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. You must delete the DynamoDB tables using the DynamoDB console to clean up the lab. In addition, if you are not part of an AWS event or you are running this lab in your own account, make sure you delete the CloudFormation stack as soon as the lab is complete. If you're using Workshop Studio Event Delivery, you don't need to delete the CloudFormation stack.]
#### Reporting issues
-Firstly, if you encounter an issue running the lab that needs to be addressed we recommend you fork the code on GitHub and make a pull request with your change. Please review [our contributing guide on GitHub.com]({{% siteparam "github_contributing_guide" %}}).
+Firstly, if you encounter an issue running the lab that needs to be addressed we recommend you fork the code on GitHub and make a pull request with your change. Please review [our contributing guide on GitHub.com](:param{key="github_contributing_guide"}).
-Secondly, if you have a feature request or you are unable to fork the package to make a change yourself please submit [an issue on our GitHub page]({{% siteparam "github_issues_link" %}}).
+Secondly, if you have a feature request or you are unable to fork the package to make a change yourself please submit [an issue on our GitHub page](:param{key="github_issues_link"}).
diff --git a/content/design-patterns/ex8streams/_index.en.md b/content/design-patterns/ex8streams/index.en.md
similarity index 93%
rename from content/design-patterns/ex8streams/_index.en.md
rename to content/design-patterns/ex8streams/index.en.md
index 5734bd98..1788ccf0 100644
--- a/content/design-patterns/ex8streams/_index.en.md
+++ b/content/design-patterns/ex8streams/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Amazon DynamoDB Streams and AWS Lambda"
+title: "Exercise 8: Amazon DynamoDB Streams and AWS Lambda"
date: 2019-12-02T10:18:15-08:00
weight: 9
chapter: true
-pre: "Exercise 8: "
description: "Learn how to process DynamoDB items with AWS Lambda for endless triggers."
---
@@ -17,4 +16,4 @@ DynamoDB Streams captures a time-ordered sequence of item-level modifications in
You will reuse the `logfile` table that you created in Exercise 1. You will enable DynamoDB Streams on the `logfile` table. Whenever a change is made to the `logfile` table, this change appears immediately in a stream. Next, you attach a Lambda function to the stream. The purpose of the Lambda function is to query DynamoDB Streams for updates to the `logfile` table and write the updates to a newly created table named `logfile_replica`. The following diagram shows an overview of this implementation.
-![DynamoDB stream with Lambda](/images/image6.jpg)
+![DynamoDB stream with Lambda](/static/images/image6.jpg)
diff --git a/content/design-patterns/_index.en.md b/content/design-patterns/index.en.md
similarity index 86%
rename from content/design-patterns/_index.en.md
rename to content/design-patterns/index.en.md
index 77005708..369eb922 100644
--- a/content/design-patterns/_index.en.md
+++ b/content/design-patterns/index.en.md
@@ -1,15 +1,16 @@
---
-title: "Advanced Design Patterns for Amazon DynamoDB"
+title: "LADV: Advanced Design Patterns for Amazon DynamoDB"
chapter: true
description: "300 level: Hands-on exercise using Python and DynamoDB best practices."
-pre: "LADV: "
-weight: 1
+weight: 10
---
In this workshop, you review [Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html) design patterns and best practices to build highly scalable applications that are optimized for performance and cost. This workshop implements these design patterns by using Python scripts. At the end of this workshop, you will have the knowledge to build and monitor DynamoDB applications that can grow to any size and scale.
Here's what this workshop includes:
-{{% children depth="1" description="true" %}}
+::children{depth=1}
+
+
### Target audience
@@ -29,4 +30,4 @@ This workshop is designed for developers, engineers, and database administrators
### Recommended study before taking the lab
-If you're not part of an AWS event and you haven't recently reviewed DynamoDB design concepts, we suggest you watch this video on [Advanced Design Patterns for DynamoDB]({{% siteparam "latest_rh_design_pattern_yt" %}}), which is about an hour in duration.
+If you're not part of an AWS event and you haven't recently reviewed DynamoDB design concepts, we suggest you watch this video on [Advanced Design Patterns for DynamoDB](:param{key="latest_rh_design_pattern_yt"}), which is about an hour in duration.
diff --git a/content/design-patterns/setup/Step2.en.md b/content/design-patterns/setup/Step2.en.md
index d22874fa..61fc7345 100644
--- a/content/design-patterns/setup/Step2.en.md
+++ b/content/design-patterns/setup/Step2.en.md
@@ -32,6 +32,4 @@ Sample output:
aws-cli/2.13.26 Python/3.11.6 Linux/6.2.0-1013-aws exe/x86_64.ubuntu.22 prompt/off
```
-{{% notice note %}}
-_Make sure you have AWS CLI version 2.x or higher and python 3.10 or higher before proceeding. If you do not have these versions, you may have difficultly successfully completing the lab._
-{{% /notice %}}
+::alert[_Make sure you have AWS CLI version 2.x or higher and python 3.10 or higher before proceeding. If you do not have these versions, you may have difficultly successfully completing the lab._]
diff --git a/content/design-patterns/setup/Step4.en.md b/content/design-patterns/setup/Step4.en.md
index 9da4387d..fbb821d6 100644
--- a/content/design-patterns/setup/Step4.en.md
+++ b/content/design-patterns/setup/Step4.en.md
@@ -91,6 +91,4 @@ ls -l ./data
- logfile_small1.csv
- logfile_stream.csv
-{{% notice info %}}
-_Note: The code provided is for instructional use only. It should not be used outside of this lab, and it is not fit for production use._
-{{% /notice %}}
+::alert[_Note: The code provided is for instructional use only. It should not be used outside of this lab, and it is not fit for production use._]{type="warning"}
diff --git a/content/design-patterns/setup/Step6.en.md b/content/design-patterns/setup/Step6.en.md
index 958018dd..9c97dec9 100644
--- a/content/design-patterns/setup/Step6.en.md
+++ b/content/design-patterns/setup/Step6.en.md
@@ -4,9 +4,7 @@ date: 2019-12-02T10:20:18-08:00
weight: 60
---
-{{% notice info %}}
-_Reminder: All commands are executed in the shell console connected to the EC2 instance, not your local machine. (If you are not sure you can always validate going back to [step 1](/design-patterns/setup/step1.html))_
-{{% /notice %}}
+::alert[_Reminder: All commands are executed in the shell console connected to the EC2 instance, not your local machine. (If you are not sure you can always validate going back to [step 1](/design-patterns/setup/step1.html))_]
In the upcoming Exercise #2 we will discuss table scan and its best practices. In this step, let's populate the table with 1 million items in preparation for that exercise.
@@ -57,9 +55,7 @@ disown
`nohup` is used to run the process in the background, and `disown` allows the load to continue in case you are disconnected.
-{{% notice note %}}
-_The following command will take about ten minutes to complete. It will run in the background._
-{{% /notice %}}
+::alert[_The following command will take about ten minutes to complete. It will run in the background._]
Run `pgrep -l python` to verify the script is loading data in the background.
@@ -73,9 +69,7 @@ Output:
3257 python
```
-{{% notice note %}}
-_The process id - the 4 digit number in the above example - will be different for everyone._
-{{% /notice %}}
+::alert[_The process id - the 4 digit number in the above example - will be different for everyone._]
The script will continue to run in the background while you work on the next exercise.
diff --git a/content/design-patterns/setup/_index.en.md b/content/design-patterns/setup/_index.en.md
deleted file mode 100644
index 66df911d..00000000
--- a/content/design-patterns/setup/_index.en.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: "Setup"
-date: 2019-12-02T07:05:12-08:00
-weight: 1
-chapter: true
-pre: "Start here: "
-description: "Setup the lab environment and connect to the lab instance on Amazon EC2."
----
-
-## GETTING STARTED
-
-To set up this workshop, choose one of the following links, depending on whether you are:
-- […running the workshop on your own (in your own account)]({{< ref "/design-patterns/setup/user-account" >}}), or
-- […attending an AWS-hosted event (using AWS-provided access codes)]({{< ref "/design-patterns/setup/aws-ws-event" >}}), or
-
-Once you have completed with either setup, continue on to:
-- [Exercise 1: DynamoDB Capacity Units and Partitioning]({{< ref "/design-patterns/ex1capacity" >}})
diff --git a/content/design-patterns/setup/aws-ws-event.md b/content/design-patterns/setup/aws-ws-event.en.md
similarity index 74%
rename from content/design-patterns/setup/aws-ws-event.md
rename to content/design-patterns/setup/aws-ws-event.en.md
index 281c9cfd..1bebb5e6 100644
--- a/content/design-patterns/setup/aws-ws-event.md
+++ b/content/design-patterns/setup/aws-ws-event.en.md
@@ -1,9 +1,8 @@
---
-title: "At an AWS Hosted Event"
+title: "Start: At an AWS Hosted Event"
date: 2019-12-02T07:05:12-08:00
weight: 3
chapter: true
-pre: "Start: "
---
### Login to AWS Workshop Studio Portal
@@ -11,22 +10,22 @@ pre: "Start: "
1. If you are provided a one-click join link, use it and skip to step 3.
2. Visit [https://catalog.us-east-1.prod.workshops.aws](https://catalog.us-east-1.prod.workshops.aws). If you attended any other workshop earlier on this portal, please logout first. Click on **Get Started** on the right hand side of the window.
-![Workshop Studio Landing Page](/images/aws-ws-event1.png)
+![Workshop Studio Landing Page](/static/images/aws-ws-event1.png)
3. On the next, **Sign in** page, choose **Email One-Time Passcode (OTP)** to sign in to your workshop page.
-![Sign in page](/images/aws-ws-event2.png)
+![Sign in page](/static/images/aws-ws-event2.png)
4. Provide an email address to receive a one-time passcode.
-![Email address input](/images/aws-ws-event3.png)
+![Email address input](/static/images/aws-ws-event3.png)
5. Enter the passcode that you received in the provided email address, and click **Sign in**.
6. Next, in the textbox, enter the event access code (eg: abcd-012345-ef) that you received from the event facilitators. If you are provided a one-click join link, you will be redirected to the next step automatically.
-![Event access code](/images/aws-ws-event4.png)
+![Event access code](/static/images/aws-ws-event4.png)
7. Select on **I agree with the Terms and Conditions** on the bottom of the next page and click **Join event** to continue to the event dashboard.
8. On the event dashboard, click on **Open AWS console** to federate into AWS Management Console in a new tab. On the same page, click **Get started** to open the workshop instructions.
-![Event dashboard](/images/aws-ws-event5.png)
+![Event dashboard](/static/images/aws-ws-event5.png)
-9. Now that you are connected continue on to: [Step 1]({{< ref "design-patterns/setup/Step1" >}}).
+9. Now that you are connected continue on to: :link[Step 1]{href="/design-patterns/setup/Step1"}.
diff --git a/content/design-patterns/setup/index.en.md b/content/design-patterns/setup/index.en.md
new file mode 100644
index 00000000..7ebc8179
--- /dev/null
+++ b/content/design-patterns/setup/index.en.md
@@ -0,0 +1,16 @@
+---
+title: "Start here: Getting Started"
+date: 2019-12-02T07:05:12-08:00
+weight: 1
+chapter: true
+description: "Setup the lab environment and connect to the lab instance on Amazon EC2."
+---
+
+## GETTING STARTED
+
+To set up this workshop, choose one of the following links, depending on whether you are:
+- :link[…running the workshop on your own (in your own account)]{href="/design-patterns/setup/user-account"}, or
+- :link[…attending an AWS-hosted event (using AWS-provided access codes)]{href="/design-patterns/setup/aws-ws-event"}, or
+
+Once you have completed with either setup, continue on to:
+- :link[Exercise 1: DynamoDB Capacity Units and Partitioning]{href="/design-patterns/ex1capacity"}
diff --git a/content/design-patterns/setup/user-account.en.md b/content/design-patterns/setup/user-account.en.md
new file mode 100644
index 00000000..894c2293
--- /dev/null
+++ b/content/design-patterns/setup/user-account.en.md
@@ -0,0 +1,29 @@
+---
+title: "Start: On your own"
+date: 2019-12-02T07:05:12-08:00
+weight: 5
+chapter: true
+---
+
+::alert[These setup instructions are identitical for LADV, LHOL, and LGME - all of which use the same Cloud9 template. Only complete this section once, and only if you're running it on your own account.]{type="warning"}
+
+::alert[Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re\:Invent, Immersion Day, etc), go to :link[At an AWS hosted Event]{href="/design-patterns/setup/aws-ws-event"}]
+
+## Launch the CloudFormation stack
+::alert[During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.]
+
+1. Launch the CloudFormation template in US West 2 to deploy the resources in your account: [![CloudFormation](/static/images/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/new?stackName=amazon-dynamodb-labs&templateURL=:param{key="design_patterns_s3_lab_yaml"})
+ 1. *Optionally, download [the YAML template](:param{key="design_patterns_s3_lab_yaml"}) and launch it your own way*
+
+1. Click *Next* on the first dialog.
+
+1. In the Parameters section, note the *Timeout* is set to zero. This means the Cloud9 instance will not sleep; you may want to change this manually to a value such as 60 to protect against unexpected charges if you forget to delete the stack at the end.
+ Leave the *WorkshopZIP* parameter unchanged and click *Next*
+![CloudFormation parameters](/static/images/awsconsole1.png)
+
+1. Scroll to the bottom and click *Next*, and then review the *Template* and *Parameters*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
+![CloudFormation parameters](/static/images/awsconsole2.png)
+ The stack will create a Cloud9 lab instance, a role for the instance, and a role for the AWS Lambda function used later on in the lab. It will use Systems Manager to configure the Cloud9 instance.
+
+
+1. After the CloudFormation stack is `CREATE_COMPLETE`, :link[continue onto Step 1]{href="/design-patterns/setup/Step1"}.
diff --git a/content/design-patterns/setup/user-account.md b/content/design-patterns/setup/user-account.md
deleted file mode 100644
index 5d15a880..00000000
--- a/content/design-patterns/setup/user-account.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: "On your own"
-date: 2019-12-02T07:05:12-08:00
-weight: 5
-chapter: true
-pre: "Start: "
----
-
-{{% notice warning %}}
-These setup instructions are identitical for LADV, LHOL, and LGME - all of which use the same Cloud9 template.
-Only complete this section once, and only if you're running it on your own account.
-{{% /notice %}}
-
-{{% notice info %}}
-Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re:Invent, Immersion Day, etc), go to [At an AWS hosted Event]({{< ref "design-patterns/setup/aws-ws-event">}})
-{{% /notice %}}
-
-## Launch the CloudFormation stack
-{{% notice warning %}}
-During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.
-{{% /notice %}}
-
-1. Launch the CloudFormation template in US East 1 to deploy the resources in your account:
-
- *Optionally, download [the YAML template]({{% siteparam "design_patterns_s3_lab_yaml" %}}) and launch it your own way*
-
-1. Click *Next* on the first dialog.
-
-1. In the Parameters section, note the *Timeout* is set to zero. This means the Cloud9 instance will not sleep; you may want to change this manually to a value such as 60 to protect against unexpected charges if you forget to delete the stack at the end.
- Leave the *WorkshopZIP* parameter unchanged and click *Next*
-![CloudFormation parameters](/images/awsconsole1.png)
-
-1. Scroll to the bottom and click *Next*, and then review the *Template* and *Parameters*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
-![CloudFormation parameters](/images/awsconsole2.png)
- The stack will create a Cloud9 lab instance, a role for the instance, and a role for the AWS Lambda function used later on in the lab. It will use Systems Manager to configure the Cloud9 instance.
-
-
-1. After the CloudFormation stack is `CREATE_COMPLETE`, [continue onto Step 1]({{< ref "design-patterns/setup/Step1" >}}).
diff --git a/content/event-driven-architecture/ex1overview/Step1.en.md b/content/event-driven-architecture/ex1overview/Step1.en.md
index 31330c72..a2dc64e7 100644
--- a/content/event-driven-architecture/ex1overview/Step1.en.md
+++ b/content/event-driven-architecture/ex1overview/Step1.en.md
@@ -4,12 +4,8 @@ date: 2019-12-02T10:26:23-08:00
weight: 2
---
-{{% notice info %}}
-Points and scoreboard only apply when this lab is run during an AWS Event. If you're running this lab independently, you have the top score!
-{{% /notice %}}
-{{% notice info %}}
-AWS Event: Be sure you've set a family-friendly team name in Workshop Studio, especially if you think you'll be on the top of the scoreboard!
-{{% /notice %}}
+::alert[Points and scoreboard only apply when this lab is run during an AWS Event. If you're running this lab independently, you have the top score!]
+::alert[AWS Event: Be sure you've set a family-friendly team name in Workshop Studio, especially if you think you'll be on the top of the scoreboard!]
To make this workshop more interesting we introduced a scoring element! As you move through the workshop and fix the pipeline you are awarded points for successfully aggregating messages.
@@ -23,6 +19,6 @@ However, once you reach 300 points we're increasing the difficulty for you. In *
The workshop operators will display the scoring of all participants during the workshop, but you can check the scoreboard on your own.
-Continue on to: [Connect the Pipeline]({{< ref "event-driven-architecture/ex2pipeline" >}})
+Continue on to: :link[Connect the Pipeline]{href="/event-driven-architecture/ex2pipeline"}
-or: [Optional - Pipeline Deep Dive]({{< ref "event-driven-architecture/ex1overview/step2.html" >}})
+or: :link[Optional - Pipeline Deep Dive]{href="/event-driven-architecture/ex1overview/step2"}
diff --git a/content/event-driven-architecture/ex1overview/Step2.en.md b/content/event-driven-architecture/ex1overview/Step2.en.md
index ac65366a..6ef8ffa6 100644
--- a/content/event-driven-architecture/ex1overview/Step2.en.md
+++ b/content/event-driven-architecture/ex1overview/Step2.en.md
@@ -4,13 +4,11 @@ date: 2019-12-02T10:26:28-08:00
weight: 3
---
-{{% notice info %}}
-Understanding how the pipeline works internally is recommended, however reading this page is not mandatory to complete the workshop.
-{{% /notice %}}
+::alert[Understanding how the pipeline works internally is recommended, however reading this page is not mandatory to complete the workshop.]
This section explains in detail how the pipeline works from end-to-end. For a simplified explanation refer to the figure below where we have divided the pipeline into three stages. For each stage we outline the input and the output of the stage.
-![DD Stages](/images/event-driven-architecture/deep-dive/stages.png)
+![DD Stages](/static/images/event-driven-architecture/deep-dive/stages.png)
## Stage 1: 'State'
@@ -37,7 +35,7 @@ The business problem of near real-time data aggregation is faced by customers in
Consider an example where five risk messages are ingested in the pipeline, as represented in the following figure. For visibility we labeled each message with an identifier from M1 to M5. Each message has a unique `TradeID`, a risk `Value`, and a group of hierarchy attributes (as explained above). For simplicity, all messages have the `RiskType` `"Delta"` and the `Version` attribute is always set to `1`.
-![DD Part1](/images/event-driven-architecture/deep-dive/pipeline-explanation-part-1.png)
+![DD Part1](/static/images/event-driven-architecture/deep-dive/pipeline-explanation-part-1.png)
The pipeline is event driven by an upstream data source that writes records into a Kinesis Data Stream, and the `StateLambda` function is invoked to process these messages further.
@@ -54,7 +52,7 @@ In order to handle a high volume of incoming messages, multiple `MapLambda` inst
The responsibility of the `MapLambda` is to perform initial pre-aggregation of the messages, or more specifically to perform arithmetic summation based on the message attributes. The pre-aggregated output of each `MapLambda` function is written into the `ReduceTable` as a single row, as seen in "Output state of the `ReduceTable`" in the above figure. For simplicity, we refer to these rows as AM1 and AM2.
-![DD Part2](/images/event-driven-architecture/deep-dive/pipeline-explanation-part-2.png)
+![DD Part2](/static/images/event-driven-architecture/deep-dive/pipeline-explanation-part-2.png)
### How does the pre-aggregation work?
@@ -83,6 +81,6 @@ Note: there is only one instance of the `ReduceLambda` function, which is achiev
The final output in the `AggregateTable` closely resembles the `Hierarchy` attribute elements, and can be easily read and displayed by a front-end!
-![DD Part3](/images/event-driven-architecture/deep-dive/pipeline-explanation-part-3.png)
+![DD Part3](/static/images/event-driven-architecture/deep-dive/pipeline-explanation-part-3.png)
-Continue on to: [Connect the Pipeline]({{< ref "event-driven-architecture/ex2pipeline" >}})
+Continue on to: :link[Connect the Pipeline]{href="/event-driven-architecture/ex2pipeline"}
diff --git a/content/event-driven-architecture/ex1overview/_index.en.md b/content/event-driven-architecture/ex1overview/index.en.md
similarity index 84%
rename from content/event-driven-architecture/ex1overview/_index.en.md
rename to content/event-driven-architecture/ex1overview/index.en.md
index a6847eab..b33ad26e 100644
--- a/content/event-driven-architecture/ex1overview/_index.en.md
+++ b/content/event-driven-architecture/ex1overview/index.en.md
@@ -15,13 +15,13 @@ The workshop contains two labs. The objective of the first lab is to establish c
However, the pipeline you build in the first lab doesn't ensure exactly once message processing. Therefore, establishing exactly once processing is the goal of *Lab 2*.
-![Architecture-1](/images/event-driven-architecture/architecture/before-lab-1.png)
+![Architecture-1](/static/images/event-driven-architecture/architecture/before-lab-1.png)
## Lab 1
The diagram below outlines a set of steps that you will need to perform in order to connect all the AWS resources.
The *Lab 1* section will give you more information and will explain how to perform each step.
-![Architecture-2](/images/event-driven-architecture/architecture/after-lab-1.png)
+![Architecture-2](/static/images/event-driven-architecture/architecture/after-lab-1.png)
## Lab 2
@@ -29,7 +29,7 @@ The *Lab 1* section will give you more information and will explain how to perfo
In *Lab 2*, we will enhance the pipeline to ensure exactly once processing for any ingested message. To make sure that our pipeline can withstand different failure modes and achieve exactly once message processing we will modify two Lambda functions.
The *Lab 2* section will give you more information and will explain how to perform each step.
-![Architecture-3](/images/event-driven-architecture/architecture/after-lab-2.png)
+![Architecture-3](/static/images/event-driven-architecture/architecture/after-lab-2.png)
## Next steps and competition
@@ -37,4 +37,4 @@ The *Pipeline deep dive with example* subsection contains a detailed explanation
To make this workshop more exciting, when run at an AWS event all participants are rated on how many messages they can aggregate correctly using a scoreboard. The *Game rules* subsection outlines the rules of the game, and how the `GeneratorLambda` function ingests data into the start of the pipeline!
-Continue on to: [Game Rules]({{< ref "event-driven-architecture/ex1overview/step1.html" >}}).
+Continue on to: :link[Game Rules]{href="/event-driven-architecture/ex1overview/step1"}.
diff --git a/content/event-driven-architecture/ex2pipeline/Step1.en.md b/content/event-driven-architecture/ex2pipeline/Step1.en.md
index 158f3a16..6389f941 100644
--- a/content/event-driven-architecture/ex2pipeline/Step1.en.md
+++ b/content/event-driven-architecture/ex2pipeline/Step1.en.md
@@ -8,7 +8,7 @@ The objective of this step is to connect the `StateLambda` function with the `In
When new messages become available in the Kinesis stream, the `StateLambda` function will be invoked to process the stream records. Each stream record contains a single trade.
-![Architecture-1](/images/event-driven-architecture/architecture/step1.png)
+![Architecture-1](/static/images/event-driven-architecture/architecture/step1.png)
## Connect the StateLambda function with a Kinesis Data Stream
@@ -16,14 +16,14 @@ When new messages become available in the Kinesis stream, the `StateLambda` func
2. Click on the `StateLambda` function to edit its configuration. See the figure below.
3. The function overview shows that the `StateLambda` function doesn't have any triggers yet. Click on the `Add trigger` button.
-![Architecture-1](/images/event-driven-architecture/lab1/add-trigger-state-lambda.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/add-trigger-state-lambda.png)
4. Specify the following configuration (see the figure below for details):
- In the first drop down select `Kinesis` as the data source.
- For the Kinesis stream, select `IncomingDataStream`.
- Set the `Batch size` to `100`.
-![Architecture-1](/images/event-driven-architecture/lab1/connect-state-lambda.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/connect-state-lambda.png)
5. Click `Add` in the bottom right corner to create and enable an event source mapping on the Lambda function.
@@ -33,12 +33,12 @@ At this point you have configured an event based connection between Kinesis Data
If everything was done correctly then the `StateLambda` function will be invoked with stream records from the Kinesis stream. Therefore, in one to two minutes you should be able to see logs from the Lambda invocations under the `Monitor` in the `Logs` tab.
-![Architecture-1](/images/event-driven-architecture/lab1/state-lambda-logs.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/state-lambda-logs.png)
You can also observe the outputs of `StateLambda` to verify the connection by reviewing the `Items` section of the DynamoDB console. To do that, navigate to the DynamoDB service in the AWS console, click `Items` on the left, and select `StateTable`.
At this stage you should see multiple rows similar to the image below. The number of items returned may vary. You can click on the orange `Run` button if you want to refresh the items.
-![Architecture-1](/images/event-driven-architecture/lab1/state-table-data.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/state-table-data.png)
-Continue on to: [Step 2]({{< ref "event-driven-architecture/ex2pipeline/step2" >}}).
+Continue on to: :link[Step 2]{href="/event-driven-architecture/ex2pipeline/step2"}.
diff --git a/content/event-driven-architecture/ex2pipeline/Step2.en.md b/content/event-driven-architecture/ex2pipeline/Step2.en.md
index 7681189b..731dda6d 100644
--- a/content/event-driven-architecture/ex2pipeline/Step2.en.md
+++ b/content/event-driven-architecture/ex2pipeline/Step2.en.md
@@ -4,11 +4,9 @@ date: 2019-12-02T10:35:42-08:00
weight: 2
---
-{{% notice info %}}
-The `MapLambda` function is already connected for you, so let us quickly check if it works as expected!
-{{% /notice %}}
+::alert[The `MapLambda` function is already connected for you, so let us quickly check if it works as expected!]
-![Architecture-1](/images/event-driven-architecture/architecture/step2.png)
+![Architecture-1](/static/images/event-driven-architecture/architecture/step2.png)
Check that `MapLambda` has a correctly configured trigger to receive messages from the `StateTable` stream:
@@ -16,7 +14,7 @@ Check that `MapLambda` has a correctly configured trigger to receive messages fr
2. Click on the `MapLambda` function to view its configuration.
3. Verify that the `MapLambda` function has a DynamoDB trigger and this trigger points to the `StateTable` (see figure below).
-![Architecture-1](/images/event-driven-architecture/target/TargetMapLambda.png)
+![Architecture-1](/static/images/event-driven-architecture/target/TargetMapLambda.png)
## How do you know it is working?
@@ -24,6 +22,6 @@ Any row written to the `StateTable` should trigger the `MapLambda` function. The
Alternatively, you can observe the outputs of the `MapLambda` function in the DynamoDB `ReduceTable`. To do that, navigate to the DynamoDB service in the AWS console, click `Items` on the left, and select `ReduceTable`. At this stage you should see multiple rows similar to the image below.
-![Reduce table items](/images/event-driven-architecture/lab1/reduce-table-items.png)
+![Reduce table items](/static/images/event-driven-architecture/lab1/reduce-table-items.png)
-Continue on to: [Step 3]({{< ref "event-driven-architecture/ex2pipeline/step3" >}}).
+Continue on to: :link[Step 3]{href="/event-driven-architecture/ex2pipeline/step3"}.
diff --git a/content/event-driven-architecture/ex2pipeline/Step3.en.md b/content/event-driven-architecture/ex2pipeline/Step3.en.md
index 804422f9..75fc2802 100644
--- a/content/event-driven-architecture/ex2pipeline/Step3.en.md
+++ b/content/event-driven-architecture/ex2pipeline/Step3.en.md
@@ -6,7 +6,7 @@ weight: 3
The objective of this last step in *Lab 1* is to correctly configure the `ReduceLambda` function, connect it to the DynamoDB stream of `ReduceTable`, and ensure the total aggregates are written to the `AggregateTable`. When you successfully complete this step, you will begin to accumulate points on the scoreboard.
-![Architecture-1](/images/event-driven-architecture/architecture/step3.png)
+![Architecture-1](/static/images/event-driven-architecture/architecture/step3.png)
## Configure Lambda concurrency
@@ -19,14 +19,14 @@ From a performance point-of-view, a single Lambda instance can handle the aggreg
4. Click the `Edit` button in the top right corner, select `Reserve concurrency` and enter `1`.
5. After you clicked `Save`, your configuration should look like the image below.
-![Architecture-1](/images/event-driven-architecture/lab1/reduce-lambda-concurrency.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/reduce-lambda-concurrency.png)
## Connect the ReduceLambda to the ReduceTable stream
Next, we want to connect the `ReduceLambda` function to the DynamoDB stream of the `ReduceTable`.
1. The function overview shows that the `ReduceLambda` function does not have a trigger. Click on the button `Add trigger`.
-![Architecture-1](/images/event-driven-architecture/lab1/add-trigger-reduce-lambda.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/add-trigger-reduce-lambda.png)
2. Specify the following configuration:
- In the drop down select `DynamoDB` as the data source.
@@ -35,11 +35,9 @@ Next, we want to connect the `ReduceLambda` function to the DynamoDB stream of t
3. Click the `Add` button in the bottom right corner.
-{{% notice warning %}}
-You will see an error here! Before we can enable this trigger we need to add IAM permissions to this Lambda functions.
-{{% /notice %}}
+::alert[You will see an error here! Before we can enable this trigger we need to add IAM permissions to this Lambda functions.]{type="warning"}
-![Architecture-1](/images/event-driven-architecture/lab1/reduce-lambda-error-permissions.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1/reduce-lambda-error-permissions.png)
## Add required IAM permissions
@@ -49,16 +47,14 @@ The error message above informs you that the `ReduceLambda` function doesn't hav
2. Open a new browser tab, go the AWS Lambda service and select the `ReduceLambda` function.
3. Navigate to the `Configuration` tab and click on `Permissions`. You should see the Lambda execution role called `ReduceLambdaRole`. Click on this role to modify it.
-![Architecture-1](/images/event-driven-architecture/lab1-permissions/click_on_role.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1-permissions/click_on_role.png)
4. Now you're redirected to the IAM service, where you see the details of the `ReduceLambdaRole`. There is a policy associated with this role, the `ReduceLambdaPolicy`. Expand the view to see the current permissions of the `ReduceLambda` function. Now, click on the button `Edit policy` to add additional permissions.
-![Architecture-1](/images/event-driven-architecture/lab1-permissions/click_on_edit_policy.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1-permissions/click_on_edit_policy.png)
### Modify the IAM policy
-{{% notice info %}}
-There is already an IAM permission in place for DynamoDB: this is necessary to ensure the workshop runs as expected. Don't get confused by this and please don't delete the permissions we've already granted! All of the Lambda functions need to be able to access the ParameterTable to check the current progress of the lab and the respective failure modes.
-{{% /notice %}}
+::alert[There is already an IAM permission in place for DynamoDB: this is necessary to ensure the workshop runs as expected. Don't get confused by this and please don't delete the permissions we've already granted! All of the Lambda functions need to be able to access the ParameterTable to check the current progress of the lab and the respective failure modes.]
* First we need to add permissions so the `ReduceLambda` function is able to read messages from the stream of the `ReduceTable`.
* Click on `Add additional permissions`
@@ -66,7 +62,7 @@ There is already an IAM permission in place for DynamoDB: this is necessary to e
* At `Actions`, under `Access level`, expand `Read`
* Check the following four checkboxes: `DescribeStream`, `GetRecords`, `GetShardIterator`, and `ListStreams`
-![Architecture-1](/images/event-driven-architecture/lab1-permissions/add_permissions.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1-permissions/add_permissions.png)
Now we need to associate these permissions with specific resources (e.g. we want the `ReduceLambda` to be able to read exclusively from the `ReduceTable` alone). Hence, expand `Resources`, and click on `Add ARN to restrict access`. Next, fill out the following:
* `Region` - The lab defaults to us-west-1, but verify your region and ensure the correct one is entered
@@ -75,23 +71,21 @@ Now we need to associate these permissions with specific resources (e.g. we want
* `Stream label` - Ensure the `Any` box is checked so that any stream label is supported. A Stream label is a unique identifier for a DynamoDB stream.
* Finally, click on `Add`. You've now granted permission for the `ReduceLambda` to read from the `ReduceTable` stream, but there is more to be done still.
-{{% notice info %}}
-The pre-filled value for `Account` is your AWS Account ID: This is already the correct value for this field, so please don't change it.
-{{% /notice %}}
+::alert[The pre-filled value for `Account` is your AWS Account ID: This is already the correct value for this field, so please don't change it.]
-![Architecture-1](/images/event-driven-architecture/lab1-permissions/resource_stream.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1-permissions/resource_stream.png)
If we make no further change, the `ReduceLambda` function will not be able to update the final results in the `AggregateTable`. We must modify the policy to add additional permissions to grant `UpdateItem` access to the function.
* Click on `Add additional permissions`
* For `Service`, select `DynamoDB`
* At `Actions`, under `Access level`, expand `Write`
* Select the checkbox `UpdateItem`
-![Architecture-1](/images/event-driven-architecture/lab1-permissions/add-permissions-write.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1-permissions/add-permissions-write.png)
* Again, we want to associate theses permissions with a specific resources: We want the `ReduceLambda` to be able to write to the `AggregateTable` alone. Hence, expand `Resources`, and click on `Add ARN to restrict access`. Next, enter the values for `Region` (using the same region as before), `Account` (leave the pre-filled account id), and `Table name` (this time it should be `AggregateTable`).
* Click `Add`.
-![Architecture-1](/images/event-driven-architecture/lab1-permissions/resource_stream_2.png)
+![Architecture-1](/static/images/event-driven-architecture/lab1-permissions/resource_stream_2.png)
* Finally, click `Review Policy` and then `Save changes` in the bottom right corner.
@@ -99,9 +93,7 @@ If we make no further change, the `ReduceLambda` function will not be able to up
If all of the above steps are executed correctly you will be able to connect the `ReduceLambda` to the DynamoDB stream of the `ReduceTable` by switching back to the open tab and again trying to click on `Add`. You may need to wait a couple of seconds for the IAM policy changes to propagate.
-{{% notice info %}}
-If you're not able to add the trigger, this may be due to a misconfiguration of the IAM policy. If you need help, go to `Summary & Conclusions` on the left, then `Solutions`, and you should see the desired `ReduceLambdaPolicy`.
-{{% /notice %}}
+::alert[If you're not able to add the trigger, this may be due to a misconfiguration of the IAM policy. If you need help, go to `Summary & Conclusions` on the left, then `Solutions`, and you should see the desired `ReduceLambdaPolicy`.]
## How do you know it is working?
@@ -109,10 +101,8 @@ If everything was done right, then the DynamoDB stream of the `ReduceTable` shou
Another way to verify it is working is to observe the items written by `ReduceLambda` to the DynamoDB table `AggregateTable`. To do that, navigate to the DynamoDB service in the AWS console, click `Items` on the left, and select `AggregateTable`. At this stage you should see multiple rows similar to the image below.
-![AggregateTable items](/images/event-driven-architecture/lab1/aggregate-table-items.png)
+![AggregateTable items](/static/images/event-driven-architecture/lab1/aggregate-table-items.png)
-{{% notice info %}}
-AWS Event: If Steps 1, 2, and 3 of *Lab 1* were completed successfully you should start gaining score points within one to two minutes. Please check the scoreboard! Ask your lab moderator to provide a link to the scoreboard.
-{{% /notice %}}
+::alert[AWS Event: If Steps 1, 2, and 3 of *Lab 1* were completed successfully you should start gaining score points within one to two minutes. Please check the scoreboard! Ask your lab moderator to provide a link to the scoreboard.]
-Continue on to: [Lab 2]({{< ref "event-driven-architecture/ex3fixbugs" >}})
+Continue on to: :link[Lab 2]{href="/event-driven-architecture/ex3fixbugs"}
diff --git a/content/event-driven-architecture/ex2pipeline/_index.en.md b/content/event-driven-architecture/ex2pipeline/index.en.md
similarity index 97%
rename from content/event-driven-architecture/ex2pipeline/_index.en.md
rename to content/event-driven-architecture/ex2pipeline/index.en.md
index 6b0ada7b..bc5e110f 100644
--- a/content/event-driven-architecture/ex2pipeline/_index.en.md
+++ b/content/event-driven-architecture/ex2pipeline/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Connect the pipeline"
+title: "Lab 1: Connect the pipeline"
date: 2019-12-02T10:17:10-08:00
weight: 3
chapter: true
-pre: "Lab 1: "
description: "Process streaming data to create an end-to-end data processing pipeline"
---
@@ -18,7 +17,7 @@ Amazon DynamoDB is a massive horizontally scaled NoSQL database designed for sin
This diagram shows the features and integrations for DynamoDB:
-![DynamoDB ecosystem](/images/event-driven-architecture/lab1/dynamodb-ecosystem.png)
+![DynamoDB ecosystem](/static/images/event-driven-architecture/lab1/dynamodb-ecosystem.png)
If you want to learn more [you can review the DynamoDB's features page on aws.amazon.com](https://aws.amazon.com/dynamodb/features/)
@@ -58,4 +57,4 @@ The Lambda service supports `Lambda function` triggers through what is called a
Now that you've learned way more about Lambda and DynamoDB than you thought possible (or perhaps more conservatively, you've skim-read more text than you expected to see on this page), let's begin Lab 1 in earnest!
-Continue on to: [Step 1]({{< ref "event-driven-architecture/ex2pipeline/step1" >}})
+Continue on to: :link[Step 1]{href="/event-driven-architecture/ex2pipeline/step1"}
diff --git a/content/event-driven-architecture/ex3fixbugs/Step1.en.md b/content/event-driven-architecture/ex3fixbugs/Step1.en.md
index bce2e73c..dfaa229e 100644
--- a/content/event-driven-architecture/ex3fixbugs/Step1.en.md
+++ b/content/event-driven-architecture/ex3fixbugs/Step1.en.md
@@ -6,7 +6,7 @@ weight: 1
The objective of this step is to modify the `StateLambda` function such that it does not successfully write duplicate messages to downstream resources.
-![Architecture-1](/images/event-driven-architecture/architecture/lab2-step1.png)
+![Architecture-1](/static/images/event-driven-architecture/architecture/lab2-step1.png)
## Study the StateLambda code
@@ -83,9 +83,7 @@ The code in line 5 adds a compound condition that ensures an item is only insert
To explain, the condition first states that at the moment of data insertion the table *should not* contain an item with partition key `pk` equal to the `record_id` or else the write should fail (see [the attribute_not_exists function](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html#Expressions.OperatorsAndFunctions.Functions)), implying this is the first time such a item/message is inserted. Then, with the inclusion of the *OR* keyword the condition says that if a row is already present in the table and the version number of the row being inserted is greater than the current row then the write should succeed.
-{{% notice info %}}
-You don't need to change anything in your Lambda code yet, this will come in just a minute if you read on.
-{{% /notice %}}
+::alert[You don't need to change anything in your Lambda code yet, this will come in just a minute if you read on.]
### Why does it work?
@@ -135,12 +133,10 @@ except ClientError as e:
Copy the code snippet above and replace it with the existing `table.update_item(...)` statement in your `StateLambda` function code. Then click on `Deploy` to apply the changes.
-{{% notice info %}}
-The above change will also help avoid duplicate writes when the Lambda service retries the `StateLambda` function after it has previously failed with a batch of incoming messages. With this change we avoid writing duplicates into `StateTable` which ensures we do not generate additional messages in the downstream `StateTable` DynamoDB stream.
-{{% /notice %}}
+::alert[The above change will also help avoid duplicate writes when the Lambda service retries the `StateLambda` function after it has previously failed with a batch of incoming messages. With this change we avoid writing duplicates into `StateTable` which ensures we do not generate additional messages in the downstream `StateTable` DynamoDB stream.]
## How do you know you fixed it?
Navigate to `StateLambda` and open `Logs` under the `Monitor` tab. Check the log messages by clicking on the hyperlinked LogStream cell and validate that you see the following string in the log lines: `Conditional put failed. This is either a duplicate...`. This message is produced by the exception handling code above. This tells us that the conditional expression is working as expected.
-Continue on to: [Step 2]({{< ref "event-driven-architecture/ex3fixbugs/step2" >}})
+Continue on to: :link[Step 2]{href="/event-driven-architecture/ex3fixbugs/step2"}
diff --git a/content/event-driven-architecture/ex3fixbugs/Step2.en.md b/content/event-driven-architecture/ex3fixbugs/Step2.en.md
index f9cb4a21..8a214f9a 100644
--- a/content/event-driven-architecture/ex3fixbugs/Step2.en.md
+++ b/content/event-driven-architecture/ex3fixbugs/Step2.en.md
@@ -10,7 +10,7 @@ The objective of this step is to modify the `ReduceLambda` function to ensure id
We compute the hash over all messages in the batch that the Lambda function is invoked with to use as the `ClientRequestToken`. Lambda ensures that the function is retried with the same batch of messages on failure. Therefore, by ensuring that all code paths in the Lambda function are deterministic we can ensure idempotency of the transactions and achieve exactly once processing at this last stage of the pipeline. This method has a weakness because we only protect against re-processed messages within a 10-minute window after the first completed `TransactWriteItems` call since a `ClientRequestToken` is valid for no more than 10 minutes, as outlined [in the official documentation](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html#DDB-TransactWriteItems-request-ClientRequestToken).
-![Architecture-1](/images/event-driven-architecture/architecture/lab2-step2.png)
+![Architecture-1](/static/images/event-driven-architecture/architecture/lab2-step2.png)
1. Navigate to the AWS Lambda service within the AWS Management console.
2. Click on the `ReduceLambda` function to edit its configuration.
@@ -88,11 +88,9 @@ Apply these changes to your Lambda function code, either manually or just by cop
Check your scoreboard. If all the previous steps are completed successfully you should start accumulating a score above 300 points. If not, check the CloudWatch Logs of the `ReduceLambda` function to check for any errors. If you see any errors, they may provide a hint on how to fix them. If you need help, go to `Summary & Conclusions` on the left, then `Solutions`, and you can see the desired code of the `ReduceLambda`.
-{{% notice info %}}
-Even if you've done everything correctly, the error rate won't drop to zero! The manually induced failures will still be there, but now the pipeline is able to sustain them and still ensure consistent aggregation.
-{{% /notice %}}
+::alert[Even if you've done everything correctly, the error rate won't drop to zero! The manually induced failures will still be there, but now the pipeline is able to sustain them and still ensure consistent aggregation.]
-Continue on to: [Summary & Conclusions]({{< ref "event-driven-architecture/ex4summary" >}})
+Continue on to: :link[Summary & Conclusions]{href="/event-driven-architecture/ex4summary"}
-or: [Optional: Add a simple Python frontend]({{< ref "event-driven-architecture/ex3fixbugs/step3" >}})
+or: :link[Optional: Add a simple Python frontend]{href="/event-driven-architecture/ex3fixbugs/step3"}
diff --git a/content/event-driven-architecture/ex3fixbugs/Step3.en.md b/content/event-driven-architecture/ex3fixbugs/Step3.en.md
index c1ac8e08..47e33373 100644
--- a/content/event-driven-architecture/ex3fixbugs/Step3.en.md
+++ b/content/event-driven-architecture/ex3fixbugs/Step3.en.md
@@ -8,11 +8,9 @@ Congratulations, you successfully completed *Lab 2*! If you've made it here and
In this step, you will start an AWS Cloud9 instance and run a Python frontend that scans the DynamoDB `AggregateTable` twice per second and displays the results, i.e. the aggregated values for the different risk types, in your terminal.
-![Architecture-1](/images/event-driven-architecture/lab2/lab2-optional.png)
+![Architecture-1](/static/images/event-driven-architecture/lab2/lab2-optional.png)
-{{% notice info %}}
-AWS Cloud9 is by far not the only option to run this Python-based frontend! If you're motivated, feel free to try running it locally on your PC (make sure you run `aws configure` first - you can get your credentials on the EventEngine dashboard), or from any EC2 instance (e.g. assign an IAM role to the EC2 instance that allows access to the `AggregateTable`).
-{{% /notice %}}
+::alert[AWS Cloud9 is by far not the only option to run this Python-based frontend! If you're motivated, feel free to try running it locally on your PC (make sure you run `aws configure` first - you can get your credentials on the EventEngine dashboard), or from any EC2 instance (e.g. assign an IAM role to the EC2 instance that allows access to the `AggregateTable`).]
## Start an AWS Cloud9 instance
Since you're clearly experienced with AWS - making it through all of the lab - we'll leave this task up to you!
@@ -26,10 +24,8 @@ pip3 install --user boto3
Now, you can copy the code below into a file, give it a descriptive name (e.g. `frontend.py`) and run it (`python3 frontend.py`). You should see the current aggregates from the `AggregationTable`, with new messages coming in every 60 seconds!
-{{% notice info %}}
-Edit the script to update the correct region value of **REGION_NAME** as per the region you are running the lab in. Use the value in **Code** from [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions).
-For example, for Ireland, enter **eu-west-1**.
-{{% /notice %}}
+::alert[Edit the script to update the correct region value of **REGION_NAME** as per the region you are running the lab in. Use the value in **Code** from [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions).
+For example, for Ireland, enter **eu-west-1**.]
```python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/content/event-driven-architecture/ex3fixbugs/_index.en.md b/content/event-driven-architecture/ex3fixbugs/index.en.md
similarity index 73%
rename from content/event-driven-architecture/ex3fixbugs/_index.en.md
rename to content/event-driven-architecture/ex3fixbugs/index.en.md
index 1bee8c2b..0861b97e 100644
--- a/content/event-driven-architecture/ex3fixbugs/_index.en.md
+++ b/content/event-driven-architecture/ex3fixbugs/index.en.md
@@ -1,15 +1,12 @@
---
-title: "Ensure fault tolerance and exactly once processing"
+title: "Lab 2: Ensure fault tolerance and exactly once processing"
date: 2019-12-02T10:17:22-08:00
weight: 4
chapter: true
-pre: "Lab 2: "
description: "Query a sharded global secondary index to quickly read sorted data by status code and date."
---
-{{% notice info %}}
-Points and scoreboard only apply when this lab is run during an AWS Event.
-{{% /notice %}}
+::alert[Points and scoreboard only apply when this lab is run during an AWS Event.]
# Are you ready to start Lab 2?
@@ -18,17 +15,15 @@ Before proceeding to *Lab 2* let's verify that *Lab 1* was successfully complete
* Second, you need accumulate 300 points to continue. The workshop will automatically switch to *Lab 2* when you reach this milestone, and this phase is complete.
* Once 300 points are accumulated, new failure modes will be introduced and all three Lambda functions (`StateLambda`, `MapLambda`, and `ReduceLambda`) will start failing randomly. This is a pre-programmed evolution of the workshop. In the Lambda console, click on any of the three Lambda functions, navigate to the `Monitor` tab and then to the `Metrics` sub-tab. You should expect to see a non-zero error rate on some of the graphs!
-{{% notice info %}}
-If the dashboard has 300 points, then congratulations: you can start Lab 2!
-{{% /notice %}}
+::alert[If the dashboard has 300 points, then congratulations: you can start Lab 2!]
-![Architecture-1](/images/event-driven-architecture/lab2/failing-lambdas.png)
+![Architecture-1](/static/images/event-driven-architecture/lab2/failing-lambdas.png)
# Let’s utilize different features of DynamoDB to ensure data integrity and fault tolerance
In *Lab 2* we will achieve exactly once processing of the messages. To make sure that our pipeline can withstand different failure modes and achieve exactly once message processing.
-![Architecture-3](/images/event-driven-architecture/architecture/after-lab-2.png)
+![Architecture-3](/static/images/event-driven-architecture/architecture/after-lab-2.png)
-Continue on to: [Step 1]({{< ref "event-driven-architecture/ex3fixbugs/step1" >}})
+Continue on to: :link[Step 1]{href="/event-driven-architecture/ex3fixbugs/step1"}
diff --git a/content/event-driven-architecture/ex4summary/_index.en.md b/content/event-driven-architecture/ex4summary/index.en.md
similarity index 91%
rename from content/event-driven-architecture/ex4summary/_index.en.md
rename to content/event-driven-architecture/ex4summary/index.en.md
index e83dd597..678f23f7 100644
--- a/content/event-driven-architecture/ex4summary/_index.en.md
+++ b/content/event-driven-architecture/ex4summary/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Summary & Conclusions"
+title: "Summary: Conclusions"
date: 2019-12-02T10:17:33-08:00
weight: 5
chapter: true
-pre: "Summary: "
description: "Explore how to maintain the ability to query on many attributes when you have a multi-entity table."
---
@@ -26,4 +25,4 @@ While this workshop is open-sourced, the blogs were released with a GitHub repos
## Solutions
-[View Solutions]({{< ref "event-driven-architecture/ex4summary/step1" >}})
+:link[View Solutions]{href="/event-driven-architecture/ex4summary/step1"}
diff --git a/content/event-driven-architecture/_index.en.md b/content/event-driven-architecture/index.en.md
similarity index 94%
rename from content/event-driven-architecture/_index.en.md
rename to content/event-driven-architecture/index.en.md
index 5819046a..ac1ff24e 100644
--- a/content/event-driven-architecture/_index.en.md
+++ b/content/event-driven-architecture/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Build a Serverless Event Driven Architecture with DynamoDB"
+title: "LEDA: Build a Serverless Event Driven Architecture with DynamoDB"
chapter: true
description: "400 level: Hands-on exercise using Python and DynamoDB Streams."
-pre: "LEDA: "
-weight: 1
+weight: 15
---
In this workshop you will be presented with a serverless event-driven data aggregation pipeline. It's built with [AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html), [Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html), and [Amazon Kinesis Data Streams](https://docs.aws.amazon.com/streams/latest/dev/introduction.html).
@@ -13,7 +12,7 @@ Over the course of two labs you will have to first connect all the elements of t
Here's what this workshop includes:
-{{% children depth="1" description="true" %}}
+::children{depth=1}
### Target audience
diff --git a/content/event-driven-architecture/setup/_index.en.md b/content/event-driven-architecture/setup/_index.en.md
deleted file mode 100644
index bdb00d1b..00000000
--- a/content/event-driven-architecture/setup/_index.en.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: "Setup"
-date: 2019-12-02T07:05:12-08:00
-weight: 1
-chapter: true
-pre: "Start here: "
-description: "Setup the lab environment."
----
-
-## GETTING STARTED
-
-To set up this workshop, choose one of the following links, depending on whether you are:
-- […running the workshop on your own (in your own account)]({{< ref "start-here/user-account/" >}}), or
-- [...attending an AWS-hosted event (using AWS-provided access codes)]({{< ref "start-here/aws-ws-event/" >}}), or
-
-Once you have completed with either setup, continue on to:
-- [Exercise 1: Overview]({{< ref "/event-driven-architecture/ex1overview" >}})
diff --git a/content/event-driven-architecture/setup/start-here/aws-ws-event.md b/content/event-driven-architecture/setup/aws-ws-event.en.md
similarity index 73%
rename from content/event-driven-architecture/setup/start-here/aws-ws-event.md
rename to content/event-driven-architecture/setup/aws-ws-event.en.md
index fdf6867a..044101a6 100644
--- a/content/event-driven-architecture/setup/start-here/aws-ws-event.md
+++ b/content/event-driven-architecture/setup/aws-ws-event.en.md
@@ -1,9 +1,8 @@
---
-title: "At an AWS Hosted Event"
+title: "Start: At an AWS Hosted Event"
date: 2019-12-02T07:05:12-08:00
weight: 4
chapter: true
-pre: "Start: "
---
### Login to AWS Workshop Studio Portal
@@ -11,22 +10,22 @@ pre: "Start: "
1. If you are provided a one-click join link, skip to step 3.
2. Visit [https://catalog.us-east-1.prod.workshops.aws](https://catalog.us-east-1.prod.workshops.aws). If you attended any other workshop earlier on this portal, please logout first. Click on **Get Started** on the right hand side of the window.
-![Workshop Studio Landing Page](/images/aws-ws-event1.png)
+![Workshop Studio Landing Page](/static/images/aws-ws-event1.png)
3. On the next, **Sign in** page, choose **Email One-Time Passcode (OTP)** to sign in to your workshop page.
-![Sign in page](/images/aws-ws-event2.png)
+![Sign in page](/static/images/aws-ws-event2.png)
4. Provide an email address to receive a one-time passcode.
-![Email address input](/images/aws-ws-event3.png)
+![Email address input](/static/images/aws-ws-event3.png)
5. Enter the passcode that you received in the provided email address, and click **Sign in**.
6. Next, in the textbox, enter the event access code (eg: abcd-012345-ef) that you received from the event facilitators. If you are provided a one-click join link, you will be redirected to the next step automatically.
-![Event access code](/images/aws-ws-event4.png)
+![Event access code](/static/images/aws-ws-event4.png)
7. Select on **I agree with the Terms and Conditions** on the bottom of the next page and click **Join event** to continue to the event dashboard.
8. On the event dashboard, click on **Open AWS console** to federate into AWS Management Console in a new tab. On the same page, click **Get started** to open the workshop instructions.
-![Event dashboard](/images/aws-ws-event5.png)
+![Event dashboard](/static/images/aws-ws-event5.png)
-Now that you are set up, continue on to: [Exercise 1: Overview]({{< ref "event-driven-architecture/ex1overview" >}}).
+Now that you are set up, continue on to: :link[Exercise 1: Overview]{href="/event-driven-architecture/ex1overview"}.
diff --git a/content/event-driven-architecture/setup/index.en.md b/content/event-driven-architecture/setup/index.en.md
new file mode 100644
index 00000000..8cc2ad46
--- /dev/null
+++ b/content/event-driven-architecture/setup/index.en.md
@@ -0,0 +1,16 @@
+---
+title: "Start here: Getting Started"
+date: 2019-12-02T07:05:12-08:00
+weight: 1
+chapter: true
+description: "Setup the lab environment."
+---
+
+## GETTING STARTED
+
+To set up this workshop, choose one of the following links, depending on whether you are:
+- :link[…running the workshop on your own (in your own account)]{href="/event-driven-architecture/setup/user-account/"}, or
+- :link[...attending an AWS-hosted event (using AWS-provided access codes)]{href="/event-driven-architecture/setup/aws-ws-event/"}, or
+
+Once you have completed with either setup, continue on to:
+- :link[Exercise 1: Overview]{href="/event-driven-architecture/ex1overview"}
diff --git a/content/event-driven-architecture/setup/start-here/user-account.md b/content/event-driven-architecture/setup/start-here/user-account.md
deleted file mode 100644
index 6fae8a57..00000000
--- a/content/event-driven-architecture/setup/start-here/user-account.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: "On your own"
-date: 2019-12-02T07:05:12-08:00
-weight: 5
-chapter: true
-pre: "Start: "
----
-
-
-{{% notice info %}}
-Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re:Invent, Immersion Day, etc), go to [At an AWS hosted Event]({{< ref "event-driven-architecture/setup/start-here/aws-ws-event">}})
-{{% /notice %}}
-## Launch the CloudFormation stack
-{{% notice warning %}}
-During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.
-{{% /notice %}}
-
-1. Launch the CloudFormation template in US East 1 to deploy the resources in your account:
-
- *Optionally, download [the YAML template]({{% siteparam "event_driven_architecture_lab_yaml" %}}) and launch it your own way*
-
-1. Click *Next* on the first dialog.
-
-1. Scroll to the bottom and click *Next*, and then review the *Template*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
-![CloudFormation parameters](/images/awsconsole2.png)
- The stack will create DynamoDB tables, Lambda functions, Kinesis streams, and IAM roles and policies which will be used later on in the lab.
-
-1. After the CloudFormation stack is `CREATE_COMPLETE`, [continue onto the overview]({{< ref "event-driven-architecture/ex1overview" >}}).
diff --git a/content/event-driven-architecture/setup/user-account.en.md b/content/event-driven-architecture/setup/user-account.en.md
new file mode 100644
index 00000000..13a3e2bd
--- /dev/null
+++ b/content/event-driven-architecture/setup/user-account.en.md
@@ -0,0 +1,25 @@
+---
+title: "Start: On your own"
+date: 2019-12-02T07:05:12-08:00
+weight: 5
+chapter: true
+---
+
+
+::alert[Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re\:Invent, Immersion Day, etc), go to :link[At an AWS hosted Event]{href="/event-driven-architecture/setup/start-here/aws-ws-event"}]
+## Launch the CloudFormation stack
+::alert[During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.]{type="warning"}
+
+1. Launch the CloudFormation template in US West 2 to deploy the resources in your account: [![CloudFormation](/static/images/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/new?stackName=amazon-dynamodb-labs&templateURL=:param{key="event_driven_architecture_lab_yaml"})
+ 1. *Optionally, download [the YAML template](:param{key="event_driven_architecture_lab_yaml"}) and launch it your own way*
+
+1. Click *Next* on the first dialog.
+
+1. Scroll to the bottom and click *Next*, and then review the *Template*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
+![CloudFormation parameters](/static/images/awsconsole2.png)
+ The stack will create DynamoDB tables, Lambda functions, Kinesis streams, and IAM roles and policies which will be used later on in the lab.
+
+1. After the CloudFormation stack is `CREATE_COMPLETE`, :link[continue onto the overview]{href="/event-driven-architecture/ex1overview"}.
+
+
+
diff --git a/content/event-driven-architecture/test.txt b/content/event-driven-architecture/test.txt
deleted file mode 100644
index e69de29b..00000000
diff --git a/content/game-player-data/core-usage/Step1.en.md b/content/game-player-data/core-usage/Step1.en.md
index 33c539e9..2664afad 100644
--- a/content/game-player-data/core-usage/Step1.en.md
+++ b/content/game-player-data/core-usage/Step1.en.md
@@ -4,7 +4,6 @@ menuTitle: "Design the primary key"
date: 2021-04-21T07:33:04-05:00
weight: 31
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/core-usage/Step2.en.md b/content/game-player-data/core-usage/Step2.en.md
index 5c113271..0b2fe05e 100644
--- a/content/game-player-data/core-usage/Step2.en.md
+++ b/content/game-player-data/core-usage/Step2.en.md
@@ -4,7 +4,6 @@ menuTitle: "Create the table"
date: 2021-04-21T07:33:04-05:00
weight: 32
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
@@ -50,9 +49,7 @@ except Exception as e:
print(e)
```
-{{% notice info %}}
-Edit **scripts/create_table.py**, set both `ReadCapacityUnits` and `WriteCapacityUnits` to **100** for *battle-royale* table.
-{{% /notice %}}
+::alert[Edit **scripts/create_table.py**, set both `ReadCapacityUnits` and `WriteCapacityUnits` to **100** for *battle-royale* table.]
The preceding script uses the [CreateTable](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) operation using [Boto 3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html), the AWS SDK for Python. The operation declares two attribute definitions, which are typed attributes to be used in the primary key. Though DynamoDB is [schemaless](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/SQLtoNoSQL.CreateTable.html), you must declare the names and types of attributes that are used for primary keys. The attributes must be included on every item that is written to the table and thus must be specified as you are creating a table.
diff --git a/content/game-player-data/core-usage/Step3.en.md b/content/game-player-data/core-usage/Step3.en.md
index 78b7d9ba..57afa842 100644
--- a/content/game-player-data/core-usage/Step3.en.md
+++ b/content/game-player-data/core-usage/Step3.en.md
@@ -4,7 +4,6 @@ menuTitle: "Bulk-load data"
date: 2021-04-21T07:33:04-05:00
weight: 33
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
@@ -66,6 +65,6 @@ You should see a `Count` of 835, indicating that all of your items were loaded s
You can also browse the table by navigating to **Services** -> **Database** -> **DynamoDB** in the AWS console.
-![BaseTableConsole](/images/game-player-data/core-usage/basetable-consolev2.png)
+![BaseTableConsole](/static/images/game-player-data/core-usage/basetable-consolev2.png)
In the next step, you see how to retrieve multiple entity types in a single request, which can reduce the total network requests you make in your application and enhance application performance.
\ No newline at end of file
diff --git a/content/game-player-data/core-usage/Step4.en.md b/content/game-player-data/core-usage/Step4.en.md
index 3b9ebc0b..79791da0 100644
--- a/content/game-player-data/core-usage/Step4.en.md
+++ b/content/game-player-data/core-usage/Step4.en.md
@@ -4,7 +4,6 @@ menuTitle: "Retrieve Item collections"
date: 2021-04-21T07:33:04-05:00
weight: 34
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/core-usage/_index.en.md b/content/game-player-data/core-usage/index.en.md
similarity index 95%
rename from content/game-player-data/core-usage/_index.en.md
rename to content/game-player-data/core-usage/index.en.md
index 0afed102..9214e024 100644
--- a/content/game-player-data/core-usage/_index.en.md
+++ b/content/game-player-data/core-usage/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Core usage: user profiles and games"
-menuTitle: "Core usage: user profiles and games"
+title: "3. Core usage: user profiles and games"
date: 2021-04-21T07:33:04-05:00
weight: 30
chapter: true
-pre: "3. "
description: "In this module, you design the primary key for the game application’s table, create the table and perform some basic actions."
---
diff --git a/content/game-player-data/_index.en.md b/content/game-player-data/index.en.md
similarity index 88%
rename from content/game-player-data/_index.en.md
rename to content/game-player-data/index.en.md
index d48d4ffb..87b61ec1 100644
--- a/content/game-player-data/_index.en.md
+++ b/content/game-player-data/index.en.md
@@ -1,9 +1,8 @@
---
-title: "Modeling Game Player Data with Amazon DynamoDB"
+title: "LGME: Modeling Game Player Data with Amazon DynamoDB"
chapter: true
description: "300 level: Hands-on exercise with Cloud9, Python, and data modelling best practices."
-pre: "LGME: "
-weight: 1
+weight: 5
---
@@ -14,7 +13,7 @@ https://aws.amazon.com/dynamodb/gaming/
Here's what this workshop includes:
-{{% children depth="1" description="true" %}}
+::children{depth=1}
### Target audience
diff --git a/content/game-player-data/join-games/Step1.en.md b/content/game-player-data/join-games/Step1.en.md
index 390dcaa6..35fe450e 100644
--- a/content/game-player-data/join-games/Step1.en.md
+++ b/content/game-player-data/join-games/Step1.en.md
@@ -4,7 +4,6 @@ menuTitle: "Add users to a game"
date: 2021-04-21T07:33:04-05:00
weight: 51
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/join-games/Step2.en.md b/content/game-player-data/join-games/Step2.en.md
index 7fbb6787..61f3c4ef 100644
--- a/content/game-player-data/join-games/Step2.en.md
+++ b/content/game-player-data/join-games/Step2.en.md
@@ -4,7 +4,6 @@ menuTitle: "Start a game"
date: 2021-04-21T07:33:04-05:00
weight: 52
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/join-games/_index.en.md b/content/game-player-data/join-games/index.en.md
similarity index 96%
rename from content/game-player-data/join-games/_index.en.md
rename to content/game-player-data/join-games/index.en.md
index 3256874f..179b46d0 100644
--- a/content/game-player-data/join-games/_index.en.md
+++ b/content/game-player-data/join-games/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Join and close games"
-menuTitle: "Join and close games"
+title: "5. Join and close games"
date: 2021-04-21T07:33:04-05:00
weight: 50
chapter: true
-pre: "5. "
description: "In this module, you learn about DynamoDB transactions and you use a DynamoDB transaction when adding new users to a game while preventing the game from becoming overloaded."
---
diff --git a/content/game-player-data/open-games/Step1.en.md b/content/game-player-data/open-games/Step1.en.md
index 8a6c2aff..6effe183 100644
--- a/content/game-player-data/open-games/Step1.en.md
+++ b/content/game-player-data/open-games/Step1.en.md
@@ -4,7 +4,6 @@ menuTitle: "Model a sparse GSI"
date: 2021-04-21T07:33:04-05:00
weight: 41
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/open-games/Step2.en.md b/content/game-player-data/open-games/Step2.en.md
index f51464bb..c3d1f1ff 100644
--- a/content/game-player-data/open-games/Step2.en.md
+++ b/content/game-player-data/open-games/Step2.en.md
@@ -4,7 +4,6 @@ menuTitle: "Create a sparse GSI"
date: 2021-04-21T07:33:04-05:00
weight: 42
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
@@ -60,9 +59,7 @@ except Exception as e:
print(e)
```
-{{% notice info %}}
-Edit **scripts/add_secondary_index.py**, set both `ReadCapacityUnits` and `WriteCapacityUnits` to **100** for `OpenGamesIndex`.
-{{% /notice %}}
+::alert[Edit **scripts/add_secondary_index.py**, set both `ReadCapacityUnits` and `WriteCapacityUnits` to **100** for `OpenGamesIndex`.]
Whenever attributes are used in a primary key for a table or secondary index, they must be defined in `AttributeDefinitions`. Then, you `Create` a new GSI in the `GlobalSecondaryIndexUpdates` property. For this GSI, you specify the index name, the schema of the primary key, the provisioned throughput, and the attributes you want to project.
diff --git a/content/game-player-data/open-games/Step3.en.md b/content/game-player-data/open-games/Step3.en.md
index 1104c506..e48032e8 100644
--- a/content/game-player-data/open-games/Step3.en.md
+++ b/content/game-player-data/open-games/Step3.en.md
@@ -4,7 +4,6 @@ menuTitle: "Query the sparse GSI"
date: 2021-04-21T07:33:04-05:00
weight: 43
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/open-games/Step4.en.md b/content/game-player-data/open-games/Step4.en.md
index f3acc2d6..354bc780 100644
--- a/content/game-player-data/open-games/Step4.en.md
+++ b/content/game-player-data/open-games/Step4.en.md
@@ -4,7 +4,6 @@ menuTitle: "Scan the sparse GSI"
date: 2021-04-21T07:33:04-05:00
weight: 44
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
@@ -62,7 +61,7 @@ Game<0ab37cf1-fc60-4d93-b72b-89335f759581 --Green Grasslands>
Additionally, using [PartiQL](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.html), you can run SQL-compatible query language to retrieve items from the table and its indexes in DynamoDB. You can navigate to PartiQL editor under **Services**, **Database**, **DynamoDB** in the AWS console, and run a `Scan` to receive a similar result.
-![PartiQLConsole](/images/game-player-data/open-games/partiql-consolev2.png)
+![PartiQLConsole](/static/images/game-player-data/open-games/partiql-consolev2.png)
In this step, you saw how using the `Scan` operation can be the right choice in specific circumstances. You used `Scan` to grab an assortment of entities from the sparse global secondary index (GSI) to show open games to players.
diff --git a/content/game-player-data/open-games/_index.en.md b/content/game-player-data/open-games/index.en.md
similarity index 96%
rename from content/game-player-data/open-games/_index.en.md
rename to content/game-player-data/open-games/index.en.md
index 552d4d82..2c21673d 100644
--- a/content/game-player-data/open-games/_index.en.md
+++ b/content/game-player-data/open-games/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Find open games"
-menuTitle: "Find open games"
+title: "4. Find open games"
date: 2021-04-21T07:33:04-05:00
weight: 40
chapter: true
-pre: "4. "
description: "In this module, you learn about using Global Secondary Indexes (GSIs) as a sparse index and use it to find open games."
---
diff --git a/content/game-player-data/past-games/Step1.en.md b/content/game-player-data/past-games/Step1.en.md
index ea804f5f..8edb407c 100644
--- a/content/game-player-data/past-games/Step1.en.md
+++ b/content/game-player-data/past-games/Step1.en.md
@@ -4,7 +4,6 @@ menuTitle: "Add an inverted index"
date: 2021-04-21T07:33:04-05:00
weight: 61
chapter: false
-pre: ""
description: "In this step, you add an inverted index to the table. An inverted index is created like any other secondary index."
---
@@ -62,9 +61,7 @@ except Exception as e:
print(e)
```
-{{% notice info %}}
-Edit **scripts/add_inverted_index.py**, set both `ReadCapacityUnits` and `WriteCapacityUnits` to **100** for `InvertedIndex`.
-{{% /notice %}}
+::alert[Edit **scripts/add_inverted_index.py**, set both `ReadCapacityUnits` and `WriteCapacityUnits` to **100** for `InvertedIndex`.]
In this script, you call an `update_table()` method on the DynamoDB client. In the method, you pass details about the secondary index you want to create, including the key schema for the index, the provisioned throughput, and the attributes to project into the index.
diff --git a/content/game-player-data/past-games/Step2.en.md b/content/game-player-data/past-games/Step2.en.md
index 6239036a..5039bd9c 100644
--- a/content/game-player-data/past-games/Step2.en.md
+++ b/content/game-player-data/past-games/Step2.en.md
@@ -4,7 +4,6 @@ menuTitle: "Retrieve games for a user"
date: 2021-04-21T07:33:04-05:00
weight: 62
chapter: false
-pre: ""
description: "In this step, you add an inverted index to the table. An inverted index is created like any other secondary index."
---
diff --git a/content/game-player-data/past-games/_index.en.md b/content/game-player-data/past-games/index.en.md
similarity index 97%
rename from content/game-player-data/past-games/_index.en.md
rename to content/game-player-data/past-games/index.en.md
index 056f2116..fcd7fd78 100644
--- a/content/game-player-data/past-games/_index.en.md
+++ b/content/game-player-data/past-games/index.en.md
@@ -1,10 +1,9 @@
---
-title: "View past games"
+title: "6. View past games"
menuTitle: "View past games"
date: 2021-04-21T07:33:04-05:00
weight: 60
chapter: true
-pre: "6. "
description: "In this module, you handle the final access pattern — find all past games for a user. Users in the application might want to view games they’ve played to watch replays, or they might want to view their friend's games."
---
diff --git a/content/game-player-data/plan-model/Step1.en.md b/content/game-player-data/plan-model/Step1.en.md
index 0c7e8a00..71241cb1 100644
--- a/content/game-player-data/plan-model/Step1.en.md
+++ b/content/game-player-data/plan-model/Step1.en.md
@@ -4,7 +4,6 @@ menuTitle: "Best Practices"
date: 2021-04-21T07:33:04-05:00
weight: 21
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
diff --git a/content/game-player-data/plan-model/Step2.en.md b/content/game-player-data/plan-model/Step2.en.md
index d41b45f7..de386fa2 100644
--- a/content/game-player-data/plan-model/Step2.en.md
+++ b/content/game-player-data/plan-model/Step2.en.md
@@ -4,7 +4,6 @@ menuTitle: "ER diagram (ERD)"
date: 2021-04-21T07:33:04-05:00
weight: 22
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
@@ -26,6 +25,6 @@ Thus, there is a many-to-many relationship between `Users` and `Games`. You can
With these entities and relationships in mind, the entity-relationship diagram is shown below.
-![ERD](/images/game-player-data/plan-model/erd.png)
+![ERD](/static/images/game-player-data/plan-model/erd.png)
Next, we will take a look at the access patterns the data model needs to support.
\ No newline at end of file
diff --git a/content/game-player-data/plan-model/Step3.en.md b/content/game-player-data/plan-model/Step3.en.md
index a6c5207a..f2af5496 100644
--- a/content/game-player-data/plan-model/Step3.en.md
+++ b/content/game-player-data/plan-model/Step3.en.md
@@ -4,7 +4,6 @@ menuTitle: "Review Access Patterns"
date: 2021-04-21T07:33:04-05:00
weight: 23
chapter: false
-pre: ""
description: "In this module, we look at different access patterns we need to model the data for."
---
diff --git a/content/game-player-data/plan-model/_index.en.md b/content/game-player-data/plan-model/index.en.md
similarity index 94%
rename from content/game-player-data/plan-model/_index.en.md
rename to content/game-player-data/plan-model/index.en.md
index ab79584d..b300d5dd 100644
--- a/content/game-player-data/plan-model/_index.en.md
+++ b/content/game-player-data/plan-model/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Plan your data model"
-menuTitle: "Plan your data model"
+title: "2. Plan your data model"
date: 2021-04-21T07:33:04-05:00
weight: 20
chapter: true
-pre: "2. "
description: "In this module, you learn about DynamoDB data modelling best practices and review application access patterns."
---
diff --git a/content/game-player-data/setup/Step1.en.md b/content/game-player-data/setup/Step1.en.md
index 1c992584..931a827f 100644
--- a/content/game-player-data/setup/Step1.en.md
+++ b/content/game-player-data/setup/Step1.en.md
@@ -4,7 +4,6 @@ menuTitle: "Obtain & Review Code"
date: 2021-04-21T07:33:04-05:00
weight: 14
chapter: false
-pre: ""
description: "To get started, you configure your environment and download code that you use during the lab."
---
@@ -21,7 +20,7 @@ description: "To get started, you configure your environment and download code t
You should now see your AWS Cloud9 environment. You need to be familiar with the three areas of the AWS Cloud9 console shown in the following screenshot:
-![Cloud9 Environment](/images/game-player-data/setup/cloud9-environment.png)
+![Cloud9 Environment](/static/images/game-player-data/setup/cloud9-environment.png)
- **File explorer**: On the left side of the IDE, the file explorer shows a list of the files in your directory.
diff --git a/content/game-player-data/setup/aws-ws-event.md b/content/game-player-data/setup/aws-ws-event.en.md
similarity index 74%
rename from content/game-player-data/setup/aws-ws-event.md
rename to content/game-player-data/setup/aws-ws-event.en.md
index 665caa0c..0e36c8ec 100644
--- a/content/game-player-data/setup/aws-ws-event.md
+++ b/content/game-player-data/setup/aws-ws-event.en.md
@@ -1,9 +1,8 @@
---
-title: "At an AWS Hosted Event"
+title: "Start: At an AWS Hosted Event"
date: 2019-12-02T07:05:12-08:00
weight: 11
chapter: true
-pre: "Start: "
---
### Login to AWS Workshop Studio Portal
@@ -11,22 +10,22 @@ pre: "Start: "
1. If you are provided a one-click join link, skip to step 3.
2. Visit [https://catalog.us-east-1.prod.workshops.aws](https://catalog.us-east-1.prod.workshops.aws). If you attended any other workshop earlier on this portal, please logout first. Click on **Get Started** on the right hand side of the window.
-![Workshop Studio Landing Page](/images/aws-ws-event1.png)
+![Workshop Studio Landing Page](/static/images/aws-ws-event1.png)
3. On the next, **Sign in** page, choose **Email One-Time Passcode (OTP)** to sign in to your workshop page.
-![Sign in page](/images/aws-ws-event2.png)
+![Sign in page](/static/images/aws-ws-event2.png)
4. Provide an email address to receive a one-time passcode.
-![Email address input](/images/aws-ws-event3.png)
+![Email address input](/static/images/aws-ws-event3.png)
5. Enter the passcode that you received in the provided email address, and click **Sign in**.
6. Next, in the textbox, enter the event access code (eg: abcd-012345-ef) that you received from the event facilitators. If you are provided a one-click join link, you will be redirected to the next step automatically.
-![Event access code](/images/aws-ws-event4.png)
+![Event access code](/static/images/aws-ws-event4.png)
7. Select on **I agree with the Terms and Conditions** on the bottom of the next page and click **Join event** to continue to the event dashboard.
8. On the event dashboard, click on **Open AWS console** to federate into AWS Management Console in a new tab. On the same page, click **Get started** to open the workshop instructions.
-![Event dashboard](/images/aws-ws-event5.png)
+![Event dashboard](/static/images/aws-ws-event5.png)
-9. Now that you are connected continue on to: [Step 1]({{< ref "game-player-data/setup/Step1" >}}).
+9. Now that you are connected continue on to: :link[Step 1]{href="/game-player-data/setup/Step1"}.
diff --git a/content/game-player-data/setup/_index.en.md b/content/game-player-data/setup/index.en.md
similarity index 60%
rename from content/game-player-data/setup/_index.en.md
rename to content/game-player-data/setup/index.en.md
index df0be672..4b9de969 100644
--- a/content/game-player-data/setup/_index.en.md
+++ b/content/game-player-data/setup/index.en.md
@@ -1,10 +1,9 @@
---
-title: "Getting Started"
+title: "1. Getting Started"
menuTitle: "Getting Started"
date: 2021-04-21T07:33:04-05:00
weight: 10
chapter: true
-pre: "1. "
description: "In this module, you configure your environment and download code that you will use throughout the lab."
---
@@ -22,14 +21,12 @@ In this module, you'll configure your environment and download code that you wil
To set up this workshop, choose one of the following paths, depending on whether you are:
-{{% notice warning %}}
-If following the lab in your own AWS Account, you will create DynamoDB tables that will incur a cost that could approach tens of dollars per day. **Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you [delete the Cloud9 environment](https://docs.aws.amazon.com/cloud9/latest/user-guide/delete-environment.html) as soon as the lab is complete**.
-{{% /notice %}}
+::alert[If following the lab in your own AWS Account, you will create DynamoDB tables that will incur a cost that could approach tens of dollars per day. **Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you [delete the Cloud9 environment](https://docs.aws.amazon.com/cloud9/latest/user-guide/delete-environment.html) as soon as the lab is complete**.]{type="warning"}
-- […running the workshop on your own (in your own AWS account)]({{< ref "/game-player-data/setup/on-your-own" >}}), which guides you to launch a Cloud9 environment using CloudFormation
+- :link[…running the workshop on your own (in your own AWS account)]{href="/game-player-data/setup/on-your-own"}, which guides you to launch a Cloud9 environment using CloudFormation
-- […attending an AWS-hosted event (using AWS-provided access-code)]({{< ref "/game-player-data/setup/aws-ws-event" >}})
+- :link[…attending an AWS-hosted event (using AWS-provided access-code)]{href="/game-player-data/setup/aws-ws-event"}
Once you have completed with either setup, continue on to:
-- [Step 1: Setup AWS Cloud9 IDE]({{< ref "/game-player-data/setup/step1" >}})
+- :link[Step 1: Setup AWS Cloud9 IDE]{href="/game-player-data/setup/step1"}
diff --git a/content/game-player-data/setup/on-your-own.en.md b/content/game-player-data/setup/on-your-own.en.md
new file mode 100644
index 00000000..18638773
--- /dev/null
+++ b/content/game-player-data/setup/on-your-own.en.md
@@ -0,0 +1,29 @@
+---
+title: "Start: On your own"
+date: 2019-12-02T07:05:12-08:00
+weight: 5
+chapter: true
+---
+
+::alert[These setup instructions are identitical for LADV, LHOL, and LGME - all of which use the same Cloud9 template. Only complete this section once, and only if you're running it on your own account.]{type="warning"}
+
+::alert[Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re\:Invent, Immersion Day, etc), go to :link[At an AWS hosted Event]{href="/game-player-data/setup/aws-ws-event"}]
+
+## Launch the CloudFormation stack
+::alert[During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.]
+
+1. Launch the CloudFormation template in US West 2 to deploy the resources in your account: [![CloudFormation](/static/images/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/new?stackName=amazon-dynamodb-labs&templateURL=:param{key="design_patterns_s3_lab_yaml"})
+ 1. *Optionally, download [the YAML template](:param{key="design_patterns_s3_lab_yaml"}) and launch it your own way*
+
+1. Click *Next* on the first dialog.
+
+1. In the Parameters section, note the *Timeout* is set to zero. This means the Cloud9 instance will not sleep; you may want to change this manually to a value such as 60 to protect against unexpected charges if you forget to delete the stack at the end.
+ Leave the *WorkshopZIP* parameter unchanged and click *Next*
+![CloudFormation parameters](/static/images/awsconsole1.png)
+
+1. Scroll to the bottom and click *Next*, and then review the *Template* and *Parameters*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
+![CloudFormation parameters](/static/images/awsconsole2.png)
+ The stack will create a Cloud9 lab instance, a role for the instance, and a role for the AWS Lambda function used later on in the lab. It will use Systems Manager to configure the Cloud9 instance.
+
+
+1. After the CloudFormation stack is `CREATE_COMPLETE`, :link[continue onto connecting to Cloud9]{href="/game-player-data/setup/Step1"}.
diff --git a/content/game-player-data/setup/on-your-own.md b/content/game-player-data/setup/on-your-own.md
deleted file mode 100644
index 3d4a219b..00000000
--- a/content/game-player-data/setup/on-your-own.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: "On your own Set up"
-menuTitle: "On your own"
-date: 2021-04-21T07:39:31-05:00
-weight: 12
-pre: "Start: "
----
-
-{{% notice warning %}}
-These setup instructions are identitical for LADV, LHOL, and LGME - all of which use the same Cloud9 template.
-Only complete this section once, and only if you're running it on your own account.
-{{% /notice %}}
-
-{{% notice info %}}
-Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re:Invent, Immersion Day, etc), go to [At an AWS hosted Event]({{< ref "game-player-data/setup/aws-ws-event">}})
-{{% /notice %}}
-
-## Launch the CloudFormation stack
-{{% notice warning %}}
-During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.
-{{% /notice %}}
-
-1. Launch the CloudFormation template in US East 1 to deploy the resources in your account:
-
- *Optionally, download [the YAML template]({{% siteparam "design_patterns_s3_lab_yaml" %}}) and launch it your own way*
-
-1. Click *Next* on the first dialog.
-
-1. In the Parameters section, note the *Timeout* is set to zero. This means the Cloud9 instance will not sleep; you may want to change this manually to a value such as 60 to protect against unexpected charges if you forget to delete the stack at the end.
- Leave the *WorkshopZIP* parameter unchanged and click *Next*
-![CloudFormation parameters](/images/awsconsole1.png)
-
-1. Scroll to the bottom and click *Next*, and then review the *Template* and *Parameters*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
-![CloudFormation parameters](/images/awsconsole2.png)
- The stack will create a Cloud9 lab instance, a role for the instance, and a role for the AWS Lambda function used later on in the lab. It will use Systems Manager to configure the Cloud9 instance.
-
-
-1. After the CloudFormation stack is `CREATE_COMPLETE`, [continue onto connecting to Cloud9]({{< ref "game-player-data/setup/Step1" >}}).
diff --git a/content/game-player-data/summary.en.md b/content/game-player-data/summary.en.md
index 08ffef39..a1e48d3e 100644
--- a/content/game-player-data/summary.en.md
+++ b/content/game-player-data/summary.en.md
@@ -4,7 +4,6 @@ menuTitle: "Summary & Cleanup"
date: 2021-04-21T07:33:04-05:00
weight: 70
chapter: true
-pre: ""
description: "Final words"
---
@@ -38,6 +37,4 @@ Please take a few moments to share your feedback with us using the link that you
If you were running this lab in your own AWS Account (not an AWS run event), don't forget to cleanup the resources, by deleting the CloudFormation stack or the resources themselves (incase of no CloudFormation stack) you used during setup.
-{{% notice warning %}}
-If following the lab in your own AWS Account, you will create DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. **Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you [delete the Cloud9 environment](https://docs.aws.amazon.com/cloud9/latest/user-guide/delete-environment.html) as soon as the lab is complete**.
-{{% /notice %}}
\ No newline at end of file
+::alert[If following the lab in your own AWS Account, you will create DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. **Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you [delete the Cloud9 environment](https://docs.aws.amazon.com/cloud9/latest/user-guide/delete-environment.html) as soon as the lab is complete**.]{type="warning"}
\ No newline at end of file
diff --git a/content/hands-on-labs/backups/aws-backup-recap.md b/content/hands-on-labs/backups/aws-backup-recap.en.md
similarity index 100%
rename from content/hands-on-labs/backups/aws-backup-recap.md
rename to content/hands-on-labs/backups/aws-backup-recap.en.md
diff --git a/content/hands-on-labs/backups/clean-up.md b/content/hands-on-labs/backups/clean-up.en.md
similarity index 79%
rename from content/hands-on-labs/backups/clean-up.md
rename to content/hands-on-labs/backups/clean-up.en.md
index 2dc409e5..28e34025 100644
--- a/content/hands-on-labs/backups/clean-up.md
+++ b/content/hands-on-labs/backups/clean-up.en.md
@@ -37,19 +37,19 @@ Follow these steps to delete a backup plan:
1. In the AWS Management Console, navigate to **Services -> AWS Backup.**
In the navigation pane, choose Backup plans.On the Backup plans page, choose *dbBackupPlan*. This takes you to the details page.
-![Backup Plan Delete 1](/images/hands-on-labs/backup/backup_plan_delete_1.png)
+![Backup Plan Delete 1](/static/images/hands-on-labs/backup/backup_plan_delete_1.png)
2. To delete the resource assignments for your plan, choose the radio button next to the dynamodbTable under **Resource assignments**, and then choose **Delete**.
-![Backup Plan Delete 2](/images/hands-on-labs/backup/backup_plan_delete_2.png)
+![Backup Plan Delete 2](/static/images/hands-on-labs/backup/backup_plan_delete_2.png)
3. To delete the backup plan, choose **Delete** in the upper-right corner of the page.
-![Backup Plan Delete 3](/images/hands-on-labs/backup/backup_plan_delete_3.png)
+![Backup Plan Delete 3](/static/images/hands-on-labs/backup/backup_plan_delete_3.png)
4. On the confirmation page, enter *dbBackupPlan*, and choose **Delete plan**.
-![Backup Plan Delete 4](/images/hands-on-labs/backup/backup_plan_delete_4.png)
+![Backup Plan Delete 4](/static/images/hands-on-labs/backup/backup_plan_delete_4.png)
### Step 3: Delete the recovery points
@@ -59,7 +59,7 @@ In the navigation pane, choose Backup plans.On the Backup plans page, choose *db
2. On the Backup vaults page, choose the *dynamodb-backup-vault*. Check the recovery point and choose **Delete**.
-![Restore Points Delete 1](/images/hands-on-labs/backup/restore_point_delete_1.png)
+![Restore Points Delete 1](/static/images/hands-on-labs/backup/restore_point_delete_1.png)
3. If you are deleting more than one recovery point, follow these steps:
@@ -79,8 +79,8 @@ Prematurely closing this tab will end the deletion process and might leave behin
### Step 4: Delete the backup vault
1. Select the backup vault *dynamodb-backup-vault* and choose **Delete**.
-![Backup Vault Delete 1](/images/hands-on-labs/backup/backup_vault_delete_1.png)
+![Backup Vault Delete 1](/static/images/hands-on-labs/backup/backup_vault_delete_1.png)
2. On the confirmation page, enter *dynamodb-backup-vault*, and choose **Delete Backup vault**.
-![Backup Vault Delete 2](/images/hands-on-labs/backup/backup_vault_delete_2.png)
+![Backup Vault Delete 2](/static/images/hands-on-labs/backup/backup_vault_delete_2.png)
diff --git a/content/hands-on-labs/backups/_index.en.md b/content/hands-on-labs/backups/index.en.md
similarity index 97%
rename from content/hands-on-labs/backups/_index.en.md
rename to content/hands-on-labs/backups/index.en.md
index 27e57064..02c9a0f5 100644
--- a/content/hands-on-labs/backups/_index.en.md
+++ b/content/hands-on-labs/backups/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Backups"
-menuTitle: "Backups"
+title: "4. Backups"
date: 2021-04-25T07:33:04-05:00
weight: 40
chapter: true
-pre: "4. "
---
diff --git a/content/hands-on-labs/backups/on-demand-backup.md b/content/hands-on-labs/backups/on-demand-backup.en.md
similarity index 84%
rename from content/hands-on-labs/backups/on-demand-backup.md
rename to content/hands-on-labs/backups/on-demand-backup.en.md
index b4f4b3c9..e0cc999c 100644
--- a/content/hands-on-labs/backups/on-demand-backup.md
+++ b/content/hands-on-labs/backups/on-demand-backup.en.md
@@ -16,11 +16,11 @@ table performance or availability.
1. First, go to the [DynamoDB Console](https://console.aws.amazon.com/dynamodbv2/) and click on *Tables* from the side menu.Choose ProductCatalog table.
On the **Backups** tab of the ProductCatalog table, choose **Create backup**.
-![OD Backup 1](/images/hands-on-labs/backup/od_backup_1.png)
+![OD Backup 1](/static/images/hands-on-labs/backup/od_backup_1.png)
2. Make sure that ProductCatalog is the source table name. Choose **Customize settings** and then select **Backup with DynamoDB**. Enter the name `ProductCatalogBackup`. Click **Create backup** to create the backup.
-![OD Backup 2](/images/hands-on-labs/backup/od_backup_2.png)
+![OD Backup 2](/static/images/hands-on-labs/backup/od_backup_2.png)
While the backup is being created, the backup status is set to
**Creating**. After the backup is complete, the backup status changes to
@@ -32,7 +32,7 @@ While the backup is being created, the backup status is set to
Choose **Backups** tab. In the list of backups, choose ProductCatalogBackup.
Choose **Restore**.
-![OD Backup 3](/images/hands-on-labs/backup/od_backup_3.png)
+![OD Backup 3](/static/images/hands-on-labs/backup/od_backup_3.png)
2. Enter `ProductCatalogODRestore` as the new table name. Confirm the
backup name and other backup details. Choose **Restore**
@@ -41,7 +41,7 @@ While the backup is being created, the backup status is set to
finished, the status of the `ProductCatalogODRestore` table changes to
**Active**.
-![OD Backup 4](/images/hands-on-labs/backup/od_backup_4.png)
+![OD Backup 4](/static/images/hands-on-labs/backup/od_backup_4.png)
### To delete a backup
@@ -54,8 +54,8 @@ ProductCatalogBackup. You can only delete the backup after the table `ProductCat
1. In the list of backups, choose ProductCatalogBackup.
1. Click **Delete**:
-![OD Backup 5](/images/hands-on-labs/backup/od_backup_5.png)
+![OD Backup 5](/static/images/hands-on-labs/backup/od_backup_5.png)
Finally, type the world `Delete` and click **Delete** to delete the backup.
-![OD Backup 6](/images/hands-on-labs/backup/od_backup_6.png)
+![OD Backup 6](/static/images/hands-on-labs/backup/od_backup_6.png)
diff --git a/content/hands-on-labs/backups/pitr-backup.md b/content/hands-on-labs/backups/pitr-backup.en.md
similarity index 83%
rename from content/hands-on-labs/backups/pitr-backup.md
rename to content/hands-on-labs/backups/pitr-backup.en.md
index 369d7410..d8cb2a31 100644
--- a/content/hands-on-labs/backups/pitr-backup.md
+++ b/content/hands-on-labs/backups/pitr-backup.en.md
@@ -20,17 +20,17 @@ default, PITR is disabled.
1. First, go to the [DynamoDB Console](https://console.aws.amazon.com/dynamodbv2/) and click on *Tables* from the side menu.
In the list of tables, choose the ProductCatalog table.On the **Backups** tab of the ProductCatalog table in the **Point-in-time recovery** section, choose **Edit**.
-![PITR Backup 1](/images/hands-on-labs/backup/pitr_backup_1.png)
+![PITR Backup 1](/static/images/hands-on-labs/backup/pitr_backup_1.png)
2. Select **Enable Point-in-time-recovery** and choose **Save changes**.
-![PITR Backup 2](/images/hands-on-labs/backup/pitr_backup_2.png)
+![PITR Backup 2](/static/images/hands-on-labs/backup/pitr_backup_2.png)
### To restore a table to a point in time
Now let us say we get some unwanted records in ProductCatalog table as highlighted below.
-![PITR Unwanted Records](/images/hands-on-labs/backup/pitr_unwanted_records.png)
+![PITR Unwanted Records](/static/images/hands-on-labs/backup/pitr_unwanted_records.png)
Follow the below steps to restore ProductCatalog using Point-in-time-recovery.
@@ -41,13 +41,13 @@ Follow the below steps to restore ProductCatalog using Point-in-time-recovery.
**Point-in-time recovery** section, choose **Restore to
point-in-time**.
-![PITR Restore 1](/images/hands-on-labs/backup/pitr_restore_1.png)
+![PITR Restore 1](/static/images/hands-on-labs/backup/pitr_restore_1.png)
2. For the new table name, enter ProductCatalogPITR. To confirm
the restorable time, set the **Restore date and time** to the **Latest
restore date**. Choose **Restore** to start the restore process.
-![PITR Restore 2](/images/hands-on-labs/backup/pitr_restore_2.png)
+![PITR Restore 2](/static/images/hands-on-labs/backup/pitr_restore_2.png)
*Note : You can restore the table to the same AWS Region or to a
different Region from where the backup resides. You can also exclude
@@ -58,4 +58,4 @@ The table that is being restored is shown with the status **Restoring**.
After the restore process is finished, the status of the
*ProductCatalogPITR* table changes to **Active**.
-![PITR Restore 3](/images/hands-on-labs/backup/pitr_restore_3.png)
+![PITR Restore 3](/static/images/hands-on-labs/backup/pitr_restore_3.png)
diff --git a/content/hands-on-labs/backups/restrict-backup-deletion.md b/content/hands-on-labs/backups/restrict-backup-deletion.en.md
similarity index 94%
rename from content/hands-on-labs/backups/restrict-backup-deletion.md
rename to content/hands-on-labs/backups/restrict-backup-deletion.en.md
index 8a09a335..fefcb20a 100644
--- a/content/hands-on-labs/backups/restrict-backup-deletion.md
+++ b/content/hands-on-labs/backups/restrict-backup-deletion.en.md
@@ -91,9 +91,9 @@ users belonging to this IAM group will inherit the permission.
Let’s say now the user tries to delete the backup in AWS backup.
-![Restrict Backup Deletion 1](/images/hands-on-labs/backup/restrict_delete_1.png)
+![Restrict Backup Deletion 1](/static/images/hands-on-labs/backup/restrict_delete_1.png)
User gets the access denied error due to insufficient permission to
delete the backup.
-![Restrict Backup Deletion 2](/images/hands-on-labs/backup/restrict_delete_2.png)
+![Restrict Backup Deletion 2](/static/images/hands-on-labs/backup/restrict_delete_2.png)
diff --git a/content/hands-on-labs/backups/schedule-backup.md b/content/hands-on-labs/backups/schedule-backup.en.md
similarity index 75%
rename from content/hands-on-labs/backups/schedule-backup.md
rename to content/hands-on-labs/backups/schedule-backup.en.md
index 00b621a2..a2fb50a0 100644
--- a/content/hands-on-labs/backups/schedule-backup.md
+++ b/content/hands-on-labs/backups/schedule-backup.en.md
@@ -12,29 +12,29 @@ starting a backup job.
1. In the AWS Management Console, navigate to **Services -> AWS
Backup.** Click on **Create Backup vault** under **Backup vaults**.
-![Scheduled Backup 1](/images/hands-on-labs/backup/sched_backup_1.png)
+![Scheduled Backup 1](/static/images/hands-on-labs/backup/sched_backup_1.png)
2. Provide Backup vault name of your choice. AWS KMS encryption master
key. By default, AWS Backup creates a master key with the alias
aws/backup for you. You can choose that key or choose any other key
in your account. Click on **Create Backup vault**
-![Scheduled Backup 2](/images/hands-on-labs/backup/sched_backup_2.png)
+![Scheduled Backup 2](/static/images/hands-on-labs/backup/sched_backup_2.png)
You can see Backup vault is created successfully
-![Scheduled Backup 3](/images/hands-on-labs/backup/sched_backup_3.png)
+![Scheduled Backup 3](/static/images/hands-on-labs/backup/sched_backup_3.png)
Now, we need to create backup plan.
3. Click on **Create Backup plan** under **Backup plans**.
-![Scheduled Backup 4](/images/hands-on-labs/backup/sched_backup_4.png)
+![Scheduled Backup 4](/static/images/hands-on-labs/backup/sched_backup_4.png)
4. Select **Build a new plan**. Provide **backup plan name** and **rule
name**.
-![Scheduled Backup 5](/images/hands-on-labs/backup/sched_backup_5.png)
+![Scheduled Backup 5](/static/images/hands-on-labs/backup/sched_backup_5.png)
5. Select **backup frequency.** The backup frequency determines how
often a backup is created. Using the console, you can choose a
@@ -48,13 +48,13 @@ Now, we need to create backup plan.
transitioned to cold storage and when it expires. I am configuring
backup to move cold storage after 31 days and expire after 366 days.
-![Scheduled Backup 6](/images/hands-on-labs/backup/sched_backup_6.png)
+![Scheduled Backup 6](/static/images/hands-on-labs/backup/sched_backup_6.png)
6. Select **backup vault** we created earlier. Click on **Create
plan**.
-![Scheduled Backup 7](/images/hands-on-labs/backup/sched_backup_7.png)
+![Scheduled Backup 7](/static/images/hands-on-labs/backup/sched_backup_7.png)
*Note: Backups that are transitioned to cold storage must be stored in
cold storage for a minimum of 90 days*
@@ -65,17 +65,17 @@ backup plan.
7. Give Resource a assignment name. Choose the default role. Select **Include specific resource types** under "1. Define resource selection"
-![Scheduled Backup 8](/images/hands-on-labs/backup/sched_backup_8.png)
+![Scheduled Backup 8](/static/images/hands-on-labs/backup/sched_backup_8.png)
8. Under "2. Select specific resource types" select the resource type **DynamoDB** in the drop down. Click choose resources, uncheck All, and select the **ProductCatalog** table. Click **Assign resources**
-![Scheduled Backup 9](/images/hands-on-labs/backup/sched_backup_9.png)
+![Scheduled Backup 9](/static/images/hands-on-labs/backup/sched_backup_9.png)
9. You can see the status of your backup job under jobs section after
your scheduled backup window timeframe. You can see your DynamoDB
backup is completed.
-![Scheduled Backup 10](/images/hands-on-labs/backup/sched_backup_10.png)
+![Scheduled Backup 10](/static/images/hands-on-labs/backup/sched_backup_10.png)
### Restore a Backup:
@@ -86,29 +86,29 @@ protected and is available to be restored using AWS Backup. In your account a ba
resources that are backed up in AWS Backup. Choose our DynamoDB
table resource.
-![Scheduled Backup 11](/images/hands-on-labs/backup/sched_backup_11.png)
+![Scheduled Backup 11](/static/images/hands-on-labs/backup/sched_backup_11.png)
2. Choose the recovery point ID of the resource. Click on **Restore**. _Note: If you do not see a recovery point, you can click "Create an on-demand backup" and complete the backup. For the purposes of this lab, you need a completed backup to continue, and you may not want to wait for your backup plan's scheduled backup._
-![Scheduled Backup 12](/images/hands-on-labs/backup/sched_backup_12.png)
+![Scheduled Backup 12](/static/images/hands-on-labs/backup/sched_backup_12.png)
3. Provide new DynamoDB table name. Leave all the settings on the defaults and click
**Restore backup**
-![Scheduled Backup 13](/images/hands-on-labs/backup/sched_backup_13.png)
+![Scheduled Backup 13](/static/images/hands-on-labs/backup/sched_backup_13.png)
The **Restore jobs** pane appears. A message at the top of the page
provides information about the restore job. You can see job status is
running.After some time you can see status changes to completed
-![Scheduled Backup 14](/images/hands-on-labs/backup/sched_backup_14.png)
+![Scheduled Backup 14](/static/images/hands-on-labs/backup/sched_backup_14.png)
You can also monitor the all backup and restore jobs in central
dashboard.
-![Scheduled Backup 15](/images/hands-on-labs/backup/sched_backup_15.png)
+![Scheduled Backup 15](/static/images/hands-on-labs/backup/sched_backup_15.png)
To see the restored table, go to the [DynamoDB Console](https://console.aws.amazon.com/dynamodbv2/) and click on *Tables* from the side menu.Choose
*ProductCatalogRestored* table. You can see the table is restored along with data.
-![Scheduled Backup 16](/images/hands-on-labs/backup/sched_backup_16.png)
+![Scheduled Backup 16](/static/images/hands-on-labs/backup/sched_backup_16.png)
diff --git a/content/hands-on-labs/cleanup.en.md b/content/hands-on-labs/cleanup.en.md
index 60e573fd..94ff40e6 100644
--- a/content/hands-on-labs/cleanup.en.md
+++ b/content/hands-on-labs/cleanup.en.md
@@ -27,6 +27,6 @@ aws dynamodb delete-table \
* The Cloudformation template that was launched during the getting started section. Navigate to the Cloudformation console, select the `amazon-dynamodb-labs` stack and click `Delete`.
-![Cleanup Delete dynamodb-labs CFN Stack](/images/hands-on-labs/dynamodb-labs-cfn-delete-stack.png)
+![Cleanup Delete dynamodb-labs CFN Stack](/static/images/hands-on-labs/dynamodb-labs-cfn-delete-stack.png)
This should wrap up the cleanup process.
diff --git a/content/hands-on-labs/explore-cli/cli-gsi.en.md b/content/hands-on-labs/explore-cli/cli-gsi.en.md
index 91164708..fcfcf3bf 100644
--- a/content/hands-on-labs/explore-cli/cli-gsi.en.md
+++ b/content/hands-on-labs/explore-cli/cli-gsi.en.md
@@ -75,7 +75,7 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
1. Running a `query` on a GSI is no different than running it against a table, except we also need to specify which GSI to use with the *\-\-index\-name* option and we'll use the GSI key attributes in the KeyConditionExpression.
@@ -101,7 +101,7 @@ The `query` could not be more optimal than this. Even if the table has a billio
2. In the base table, the Primary Key uniquely identifies the row which means that a `get-item` request will match AT MOST one item. Since we can select any attributes as the Keys for a GSI, there is no guarantee that the keys of a GSI will uniquely identify a single item. Therefore, DynamoDB prevents you from executing a `get-item` against a GSI.
-{{% /expand%}}
+::::
### Cleanup
diff --git a/content/hands-on-labs/explore-cli/cli-read-item-collection.en.md b/content/hands-on-labs/explore-cli/cli-read-item-collection.en.md
index ae2f690a..17143b9b 100644
--- a/content/hands-on-labs/explore-cli/cli-read-item-collection.en.md
+++ b/content/hands-on-labs/explore-cli/cli-read-item-collection.en.md
@@ -80,7 +80,7 @@ Hint: consider the *max-items* and *scan-index-forward* options. The solution i
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
If we want to order items in ascending order of the sort key then we tell DynamoDB to scan the index moving forward using the *\-\-scan-index-forward* option. If we want to limit the number of items then we use the *\-\-max-items* option. This would be analogous in SQL to "ORDER BY ReplyDateTime ASC LIMIT 1".
```bash
@@ -108,4 +108,4 @@ aws dynamodb query \
--no-scan-index-forward \
--return-consumed-capacity TOTAL
```
-{{% /expand%}}
+::::
\ No newline at end of file
diff --git a/content/hands-on-labs/explore-cli/cli-scan.en.md b/content/hands-on-labs/explore-cli/cli-scan.en.md
index 753f8f72..a75b0602 100644
--- a/content/hands-on-labs/explore-cli/cli-scan.en.md
+++ b/content/hands-on-labs/explore-cli/cli-scan.en.md
@@ -71,7 +71,7 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
First we need to understand the structure of the data in the Forum Table so let's do a scan to see what attributes exist:
```bash
@@ -148,5 +148,4 @@ aws dynamodb scan \
--expression-attribute-names '{"#Views" : "Views"}' \
--return-consumed-capacity TOTAL
```
-
-{{% /expand%}}
+::::
diff --git a/content/hands-on-labs/explore-cli/cli-writing-data.en.md b/content/hands-on-labs/explore-cli/cli-writing-data.en.md
index a247a1b7..11a3051b 100644
--- a/content/hands-on-labs/explore-cli/cli-writing-data.en.md
+++ b/content/hands-on-labs/explore-cli/cli-writing-data.en.md
@@ -76,7 +76,7 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
First we need to see what the item looks like:
@@ -154,5 +154,4 @@ aws dynamodb update-item \
```
You can use the `get-item` command to verify that these changes were made after each step.
-
-{{% /expand%}}
+::::
diff --git a/content/hands-on-labs/explore-cli/_index.en.md b/content/hands-on-labs/explore-cli/index.en.md
similarity index 96%
rename from content/hands-on-labs/explore-cli/_index.en.md
rename to content/hands-on-labs/explore-cli/index.en.md
index e24adffc..bee8a755 100644
--- a/content/hands-on-labs/explore-cli/_index.en.md
+++ b/content/hands-on-labs/explore-cli/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Explore DynamoDB with the CLI"
-menuTitle: "Explore DynamoDB with the CLI"
+title: "2. Explore DynamoDB with the CLI"
date: 2021-04-21T07:33:04-05:00
weight: 20
chapter: true
-pre: "2. "
---
We will be exploring DynamoDB with the AWS CLI using the [AWS cloud9 management Console](https://console.aws.amazon.com/cloud9/home). If you haven't already, choose *open IDE* to launch AWS Cloud9 environment. You can close the Welcome screen and adjust your terminal to increase screen area, or close all the windows and navigate to *Window* -> *New Terminal* to open a new terminal window.
diff --git a/content/hands-on-labs/explore-console/console-gsi.en.md b/content/hands-on-labs/explore-console/console-gsi.en.md
index aa3a75b1..a7995342 100644
--- a/content/hands-on-labs/explore-console/console-gsi.en.md
+++ b/content/hands-on-labs/explore-console/console-gsi.en.md
@@ -14,7 +14,7 @@ Armed with this knowledge of GSIs, we can now create a GSI on the **Reply** tabl
Navigate to the **Reply** table, switch to the **Indexes** tab and click `Create Index`.
-![Console Create GSI 1](/images/hands-on-labs/explore-console/console_create_gsi_1.png)
+![Console Create GSI 1](/static/images/hands-on-labs/explore-console/console_create_gsi_1.png)
Enter `PostedBy` as the Partition key, `ReplyDateTime` as the Sort key, and `PostedBy-ReplyDateTime-gsi` as the Index name. Leave the other settings as defaults and click `Create Index`. Once the index leaves the `Creating` state you can continue on to the exercise below.
@@ -28,18 +28,17 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
1. Navigate to the Item Explorer for the **Reply** table. Running a Query on a GSI is no different than running it against a table, except we need to select the GSI we want to use for the Query and we'll use the GSI key attributes in the KeyConditionExpression.
Even if the table has a billion **Reply** items authored by other Users, this query will only cost us to read the exact 3 items we're hoping to return (unlike a `Scan`).
-![Console Create GSI 2](/images/hands-on-labs/explore-console/console_create_gsi_2.png)
-
-{{% /expand%}}
+![Console Create GSI 2](/static/images/hands-on-labs/explore-console/console_create_gsi_2.png)
+::::
### Cleanup
When you're done, make sure to remove the GSI. Return to the Indexes tab, select the `PostedBy-ReplyDateTime-gsi` index and click `Delete`.
-![Console Delete GSI](/images/hands-on-labs/explore-console/console_delete_gsi.png)
+![Console Delete GSI](/static/images/hands-on-labs/explore-console/console_delete_gsi.png)
diff --git a/content/hands-on-labs/explore-console/console-read-data.en.md b/content/hands-on-labs/explore-console/console-read-data.en.md
index 73b0245a..7c45699b 100644
--- a/content/hands-on-labs/explore-console/console-read-data.en.md
+++ b/content/hands-on-labs/explore-console/console-read-data.en.md
@@ -6,18 +6,18 @@ weight: 31
First, go to the [DynamoDB Console](https://console.aws.amazon.com/dynamodbv2/) and click on *Tables* from the side menu.
-![Console Pick Tables](/images/hands-on-labs/explore-console/dynamodb_pick_tables.png)
+![Console Pick Tables](/static/images/hands-on-labs/explore-console/dynamodb_pick_tables.png)
Next, choose the `ProductCatalog` table and click `Explore table items` on the top right to view the items.
-![Console ProductCatalog Items Preview](/images/hands-on-labs/explore-console/console_productcatalog_preview.png)
+![Console ProductCatalog Items Preview](/static/images/hands-on-labs/explore-console/console_productcatalog_preview.png)
We can see visually that the table has a Partition Key of *Id* (which is the `Number` type), no sort key, and there are 8 items in the table. Some items are Books and some items are Bicycles and some attributes like *Id*, *Price*, *ProductCategory*, and *Title* exist in every Item while other Category specific attributes like Authors or Colors exist only on some items.
Click on the *Id* attribute `101` to pull up the Item editor for that Item. We can see and modify all the attributes for this item right from the console. Try changing the *Title* to "Book 101 Title New and Improved". Click **Add new attribute** named *Reviewers* of the String set type and then clicking **Insert a field** twice to add a couple of entries to that set. When you're done click **Save changes**
-![Console ProductCatalog Items Editor Forms](/images/hands-on-labs/explore-console/console_item_editor_forms.png)
+![Console ProductCatalog Items Editor Forms](/static/images/hands-on-labs/explore-console/console_item_editor_forms.png)
You can also use the Item editor in DynamoDB JSON notation (instead of the default Form based editor) by clicking **JSON** in the top right corner. This notation should look familiar if you already went through the [Explore the DynamoDB CLI](/hands-on-labs/explore-cli.html) portion of the lab. The DynamoDB JSON format is described in the [DynamoDB Low-Level API](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.LowLevelAPI.html) section of the Developer Guide.
-![Console ProductCatalog Items Editor JSON](/images/hands-on-labs/explore-console/console_item_editor_json.png)
+![Console ProductCatalog Items Editor JSON](/static/images/hands-on-labs/explore-console/console_item_editor_json.png)
diff --git a/content/hands-on-labs/explore-console/console-read-item-collection.en.md b/content/hands-on-labs/explore-console/console-read-item-collection.en.md
index c93a7861..1b5d6bf4 100644
--- a/content/hands-on-labs/explore-console/console-read-item-collection.en.md
+++ b/content/hands-on-labs/explore-console/console-read-item-collection.en.md
@@ -17,29 +17,29 @@ The Key Condition Expression will define the number of RRUs or RCUs that are con
We can optionally also specify a [Filter Expression](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.FilterExpression) for our Query. If we were comparing this to SQL, we would say "this is the part of the WHERE clause that acts on the non-Key attributes". Filter Expressions act to remove some items from the Result Set returned by the Query, **but they do not affect the consumed capacity of the Query**. If your Key Condition Expression matches 1,000,000 items and your FilterExpression reduces the result set down to 100 items, you will still be charged to read all 1,000,000 items. But the Filter Expression reduces the amount of data returned from the network connection so there is still a benefit to our application in using Filter Expressions even if it doesn't affect the price of the Query.
The ProductCatalog table we used in the previous examples only has a Partition Key so let's look at the data in the **Reply** table which has both a Partition Key and a Sort Key. Select the left menu bar **Explore items** under Tables.
-![Console Menu Item Explorer](/images/hands-on-labs/explore-console/console_menu_explore_item.png)
+![Console Menu Item Explorer](/static/images/hands-on-labs/explore-console/console_menu_explore_item.png)
You may need to click the hamburger menu icon to expand the left menu if its hidden.
-![Console Menu Hamburger Icon](/images/hands-on-labs/explore-console/console_menu_hamburger_icon.png)
+![Console Menu Hamburger Icon](/static/images/hands-on-labs/explore-console/console_menu_hamburger_icon.png)
Once you enter the Explore Items you need to select the **Reply** table and then expand the Scan/Query items box.
-![Item Explorer Expand Tables](/images/hands-on-labs/explore-console/console_explore_item_select_table.png)
+![Item Explorer Expand Tables](/static/images/hands-on-labs/explore-console/console_explore_item_select_table.png)
Data in this table has an Id attribute which references items in the Thread table. Our data has two threads, and each thread has 2 replies. Let's use the *Query* functionality to read just the items from thread 1 by pasting `Amazon DynamoDB#DynamoDB Thread 1` into the *Id (Partition key)* box and then clicking **Run**.
We can see that there are two Reply items in the `DynamoDB Thread 1` thread.
-![Item Explorer Query Reply 1](/images/hands-on-labs/explore-console/console_item_explorer_query_reply_1.png)
+![Item Explorer Query Reply 1](/static/images/hands-on-labs/explore-console/console_item_explorer_query_reply_1.png)
Since the Sort Key in this table is a timestamp, we could specify a Key Condition Expression to return only the replies in a thread that were posted after a certain time by adding a sort key condition where `ReplyDateTime` is More than `2015-09-21` and clicking **Run**.
-![Item Explorer Query Reply 2](/images/hands-on-labs/explore-console/console_item_explorer_query_reply_2.png)
+![Item Explorer Query Reply 2](/static/images/hands-on-labs/explore-console/console_item_explorer_query_reply_2.png)
Remember we can use Filter Expressions if we want to limit our results based on non-key attributes. For example, we could find all the replies in Thread 1 that were posted by User B. Clear the sort key condition, and click **Add filter** then use `PostedBy` for the Attribute name, Condition `Equals` and Value `User B`, then click **Run**.
-![Item Explorer Query Reply 3](/images/hands-on-labs/explore-console/console_item_explorer_query_reply_3.png)
+![Item Explorer Query Reply 3](/static/images/hands-on-labs/explore-console/console_item_explorer_query_reply_3.png)
## Exercise
@@ -49,9 +49,9 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
If we want to order items in descending order of the sort key there are two ways to accomplish this in the console. We could either choose the **Sort descending** checkbox before running the query, or we could run the query and click the arrow next to the *ReplyDateTime* attribute name in the **Items returned** pane to change the sort order on that attribute.
-![Item Explorer Query Reply 4](/images/hands-on-labs/explore-console/console_item_explorer_query_reply_4.png)
+![Item Explorer Query Reply 4](/static/images/hands-on-labs/explore-console/console_item_explorer_query_reply_4.png)
+::::
-{{% /expand%}}
diff --git a/content/hands-on-labs/explore-console/console-scan.en.md b/content/hands-on-labs/explore-console/console-scan.en.md
index 61585e19..b885b4e6 100644
--- a/content/hands-on-labs/explore-console/console-scan.en.md
+++ b/content/hands-on-labs/explore-console/console-scan.en.md
@@ -9,18 +9,18 @@ The [Scan API](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/AP
The Scan API is similar to the Query API except that since we want to scan the whole table and not just a single Item Collection, there is no Key Condition Expression for a Scan. However, you can specify a [Filter Expression](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression) which will reduce the size of the result set (even though it will not reduce the amount of capacity consumed).
Let us look at the data in the **Reply** table which has both a Partition Key and a Sort Key. Select the left menu bar **Explore items**.
-![Console Menu Item Explorer](/images/hands-on-labs/explore-console/console_menu_explore_item.png)
+![Console Menu Item Explorer](/static/images/hands-on-labs/explore-console/console_menu_explore_item.png)
You may need to click the hamburger menu icon to expand the left menu if its hidden.
-![Console Menu Hamburger Icon](/images/hands-on-labs/explore-console/console_menu_hamburger_icon.png)
+![Console Menu Hamburger Icon](/static/images/hands-on-labs/explore-console/console_menu_hamburger_icon.png)
Once you enter the Explore Items you need to select the **Reply** table and then expand the Scan/Query items box.
-![Item Explorer Expand Tables](/images/hands-on-labs/explore-console/console_explore_item_select_table.png)
+![Item Explorer Expand Tables](/static/images/hands-on-labs/explore-console/console_explore_item_select_table.png)
For example, we could find all the replies in the Reply that were posted by User A.
-![Item Explorer Scan Reply 1](/images/hands-on-labs/explore-console/console_item_explorer_scan_reply_1.png)
+![Item Explorer Scan Reply 1](/static/images/hands-on-labs/explore-console/console_item_explorer_scan_reply_1.png)
You should see 3 **Reply** items posted by User A.
@@ -32,9 +32,8 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
For this access pattern we will need to make a FilterCondition with clauses on both the *Threads* and *Views* attributes before clicking **Run**.
-![Item Explorer Scan Reply 2](/images/hands-on-labs/explore-console/console_item_explorer_scan_reply_2.png)
-
-{{% /expand%}}
+![Item Explorer Scan Reply 2](/static/images/hands-on-labs/explore-console/console_item_explorer_scan_reply_2.png)
+::::
diff --git a/content/hands-on-labs/explore-console/console-writing-data.en.md b/content/hands-on-labs/explore-console/console-writing-data.en.md
index 0af530fe..a9dbddc5 100644
--- a/content/hands-on-labs/explore-console/console-writing-data.en.md
+++ b/content/hands-on-labs/explore-console/console-writing-data.en.md
@@ -10,7 +10,7 @@ The DynamoDB [PutItem API](https://docs.aws.amazon.com/amazondynamodb/latest/API
Let's say we wanted to insert a new item into the *Reply* table from the console. First, navigate to the **Reply** table click the **Create Item** button.
-![Console Create Item 1](/images/hands-on-labs/explore-console/console_create_item_1.png)
+![Console Create Item 1](/static/images/hands-on-labs/explore-console/console_create_item_1.png)
Click `JSON view`, ensure `View DynamoDB JSON` is deselected, paste the following JSON, and then click **Create Item** to insert the new item.
@@ -23,7 +23,7 @@ Click `JSON view`, ensure `View DynamoDB JSON` is deselected, paste the followin
}
```
-![Console Create Item 2](/images/hands-on-labs/explore-console/console_create_item_2.png)
+![Console Create Item 2](/static/images/hands-on-labs/explore-console/console_create_item_2.png)
## Updating or Deleting Data
@@ -33,7 +33,7 @@ The DynamoDB [DeleteItem API](https://docs.aws.amazon.com/amazondynamodb/latest/
You can easily modify or delete an item using the console by selecting the checkbox next to the item of interest, clicking the **Actions** dropdown and performing the desired action.
-![Console Delete Item](/images/hands-on-labs/explore-console/console_delete_item.png)
+![Console Delete Item](/static/images/hands-on-labs/explore-console/console_delete_item.png)
## Exercise
@@ -43,22 +43,21 @@ The solution is expandable below but try to figure it out yourself before moving
**Click below to expand and see the exercise solutions**
-{{%expand "Expand this to see the solution" %}}
+::::expand{header="Expand this to see the solution"}
Navigate to the **ProductCatalog** Table and click the `Id` 201 hyperlink to bring up the Item Editor for that item. Click the `+` icon next to the *Color* attribute to expand that List.
-![Console Modify Item 1](/images/hands-on-labs/explore-console/console_modify_item_1.png)
+![Console Modify Item 1](/static/images/hands-on-labs/explore-console/console_modify_item_1.png)
Click `Insert a field` and pick the `String` type. Do this twice. Add the colors "Silver" and "Green" to the list, then click `Save changes`.
-![Console Modify Item 2](/images/hands-on-labs/explore-console/console_modify_item_2.png)
+![Console Modify Item 2](/static/images/hands-on-labs/explore-console/console_modify_item_2.png)
We can see by expanding the Items Preview that those two list entries were added.
-![Console Modify Item 3](/images/hands-on-labs/explore-console/console_modify_item_3.png)
+![Console Modify Item 3](/static/images/hands-on-labs/explore-console/console_modify_item_3.png)
Return to the item editor for `Id` 201 and use the `Remove` button next to the Silver and Green list entries to remove them from the `Color` attribute, then click `Save changes`.
-![Console Modify Item 4](/images/hands-on-labs/explore-console/console_modify_item_4.png)
-
-{{% /expand%}}
+![Console Modify Item 4](/static/images/hands-on-labs/explore-console/console_modify_item_4.png)
+::::
diff --git a/content/hands-on-labs/explore-console/_index.en.md b/content/hands-on-labs/explore-console/index.en.md
similarity index 96%
rename from content/hands-on-labs/explore-console/_index.en.md
rename to content/hands-on-labs/explore-console/index.en.md
index bc90cec9..e358786f 100644
--- a/content/hands-on-labs/explore-console/_index.en.md
+++ b/content/hands-on-labs/explore-console/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Explore the DynamoDB Console"
-menuTitle: "Explore the DynamoDB Console"
+title: "3. Explore the DynamoDB Console"
date: 2021-04-25T07:33:04-05:00
weight: 30
chapter: true
-pre: "3. "
---
In this lab we will be exploring the [DynamoDB section of the AWS Management Console](https://console.aws.amazon.com/dynamodbv2/). There are two versions of the console and while you can always click "Revert to the current console" we will be working with V2 of the console.
diff --git a/content/hands-on-labs/_index.en.md b/content/hands-on-labs/index.en.md
similarity index 83%
rename from content/hands-on-labs/_index.en.md
rename to content/hands-on-labs/index.en.md
index 3ef992ec..e08df886 100644
--- a/content/hands-on-labs/_index.en.md
+++ b/content/hands-on-labs/index.en.md
@@ -1,15 +1,14 @@
---
-title: "Hands-on Labs for Amazon DynamoDB"
+title: "LHOL: Hands-on Labs for Amazon DynamoDB"
chapter: true
description: "200 level: Hands-on exercises with Cloud9 using the AWS CLI and Console."
-pre: "LHOL: "
weight: 1
---
In this workshop, you will learn to create and work with [Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html).
Here's what this workshop includes:
-{{% children depth="1" description="true" %}}
+::children{depth=1}
### Target audience
@@ -24,4 +23,4 @@ This workshop is designed for developers, engineers, and database administrators
### Recommended study before taking the lab
-If you're not part of an AWS event and you haven't recently reviewed DynamoDB design concepts, we suggest you watch this video on [Advanced Design Patterns for DynamoDB]({{% siteparam "latest_rh_design_pattern_yt" %}}), which is about an hour in duration.
+If you're not part of an AWS event and you haven't recently reviewed DynamoDB design concepts, we suggest you watch this video on [Advanced Design Patterns for DynamoDB](:param{key="latest_rh_design_pattern_yt"}), which is about an hour in duration.
diff --git a/content/hands-on-labs/rdbms-migration/_index.en.md b/content/hands-on-labs/rdbms-migration/index.en.md
similarity index 84%
rename from content/hands-on-labs/rdbms-migration/_index.en.md
rename to content/hands-on-labs/rdbms-migration/index.en.md
index 8aa3db40..26f1f9a0 100644
--- a/content/hands-on-labs/rdbms-migration/_index.en.md
+++ b/content/hands-on-labs/rdbms-migration/index.en.md
@@ -1,12 +1,10 @@
---
-title: "Relational Modeling & Migration"
-menuTitle: "Relational Modeling & Migration"
+title: "5. LMIG: Relational Modeling & Migration"
date: 2021-04-25T07:33:04-05:00
weight: 50
-pre: "5. "
---
-In this module, you will learn how to design a target data model in DynamoDB for highly normalized relational data in a relational database.
+In this module, also classified as LMIG, you will learn how to design a target data model in DynamoDB for highly normalized relational data in a relational database.
The exercise also guides a step by step migration of an IMDb dataset from a self-managed MySQL database instance on EC2 to a fully managed key-value pair database Amazon DynamoDB.
At the end of this lesson, you should feel confident in your ability to design and migrate an existing relational database to Amazon DynamoDB.
@@ -25,4 +23,4 @@ There are multiple factors that will influence our decisions in building the tar
We will briefly discuss the key aspects of creating a model that will serve various access patters with ultralow latency and low I/O and cost.
-![Final Deployment Architecture](/images/denormalization.png)
+![Final Deployment Architecture](/static/images/denormalization.png)
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter00.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter00.en.md
index b4150724..bd045617 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter00.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter00.en.md
@@ -18,4 +18,4 @@ CloudFormation DMS Instance Resources:
- DMS VPC: Migration VPC on in the N. Virginia region. This VPC will host DMS replication instance.
- Replication Instance: DMS Replication instance that will facilitate database migration from source MySQL server on EC2 to Amazon DynamoDB
-![Final Deployment Architecture](/images/migration-environment.png)
+![Final Deployment Architecture](/static/images/migration-environment.png)
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter02-1.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter02-1.en.md
index 9e86066a..fc2d5986 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter02-1.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter02-1.en.md
@@ -12,21 +12,18 @@ Let's create the DMS resources for the workshop.
3. Under “Add permissions” use the search box to find the “AmazonDMSVPCManagementRole” policy and select it, then click “Next”
5. Under “Name, review, and create” add the role name as dms-vpc-role and click Create role
-{{% notice note %}}
-_Do not continue unless you have made the IAM role._
-{{% /notice %}}
+::alert[_Do not continue unless you have made the IAM role._]
-1. Launch the CloudFormation template in US East 1 to deploy the resources in your account:
-
- *Optionally, download [the YAML template]({{% siteparam "lhol_migration_dms_setup_yaml" %}}) and launch it your own way in the [CloudFormation Console](https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/create/template)*
+1. Launch the CloudFormation template in US West 2 to deploy the resources in your account: [![CloudFormation](/static/images/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/new?stackName=amazon-dynamodb-labs&templateURL=:param{key="lhol_migration_dms_setup_yaml"})
+ 1. *Optionally, download [the YAML template](:param{key="lhol_migration_dms_setup_yaml"}) and launch it your own way*
9. Click Next
10. Confirm the Stack Name *dynamodbmigration* and keep the default parameters (modify if necessary)
- ![Final Deployment Architecture](/images/migration18.jpg)
+ ![Final Deployment Architecture](/static/images/migration18.jpg)
11. Click “Next” twice
12. Check “I acknowledge that AWS CloudFormation might create IAM resources with custom names.”
1. Click Submit. The CloudFormation template will take about 15 minutes to build a replication environment. You should continue the lab while the stack creates in the background.
- ![Final Deployment Architecture](/images/migration19.jpg)
+ ![Final Deployment Architecture](/static/images/migration19.jpg)
+
+::alert[_Do not wait for the stack to complete creation._ **Please continue the lab and allow it to create in the background.**]
+
-{{% notice note %}}
-_Do not wait for the stack to complete creation._ **Please continue the lab and allow it to create in the background.**
-{{% /notice %}}
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter02.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter02.en.md
index bad64191..29593dae 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter02.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter02.en.md
@@ -7,20 +7,19 @@ weight: 20
This chapter will create source environment on AWS as discussed during Exercise Overview.
The CloudFormation template used below will create Source VPC, EC2 hosting MySQL server, IMDb database and load IMDb public dataset into 6 tables.
-1. Launch the CloudFormation template in US East 1 to deploy the resources in your account:
-
- *Optionally, download [the YAML template]({{% siteparam "lhol_migration_setup_yaml" %}}) and launch it your own way in the [CloudFormation Console](https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/create/template)*
+1. Launch the CloudFormation template in US West 2 to deploy the resources in your account: [![CloudFormation](/static/images/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/new?stackName=amazon-dynamodb-labs&templateURL=:param{key="lhol_migration_setup_yaml"})
+ 1. *Optionally, download [the YAML template](:param{key="lhol_migration_setup_yaml"}) and launch it your own way*
4. Click Next
5. Confirm the Stack Name *rdbmsmigration* and update parameters if necessary (leave the default options if at all possible)
- ![Final Deployment Architecture](/images/migration6.jpg)
+ ![Final Deployment Architecture](/static/images/migration6.jpg)
6. Click “Next” twice then check “I acknowledge that AWS CloudFormation might create IAM resources with custom names.”
7. Click "Submit"
8. The CloudFormation stack will take about 5 minutes to build the environment
- ![Final Deployment Architecture](/images/migration7.jpg)
+ ![Final Deployment Architecture](/static/images/migration7.jpg)
9. Go to [EC2 Dashboard](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#Instances:) and ensure the Status check column is 2/2 checks passed before moving to the next step.
- ![Final Deployment Architecture](/images/migration8.jpg)
+ ![Final Deployment Architecture](/static/images/migration8.jpg)
+
+
+::alert[_Do not continue unless the MySQL instance is passing both health checks, 2/2._]
-{{% notice note %}}
-_Do not continue unless the MySQL instance is passing both health checks, 2/2._
-{{% /notice %}}
\ No newline at end of file
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter03.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter03.en.md
index 43c66d47..1a4f0901 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter03.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter03.en.md
@@ -14,39 +14,39 @@ It has also configured a remote MySQL user based on the CloudFormation input par
1. Go to [EC2 console](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#Instances:instanceState=running)
2. Select the MySQL-Instance and click Connect
- ![Final Deployment Architecture](/images/migration9.jpg)
+ ![Final Deployment Architecture](/static/images/migration9.jpg)
3. Make sure ec2-user is filled under the User name field. Click Connect
- ![Final Deployment Architecture](/images/migration10.jpg)
+ ![Final Deployment Architecture](/static/images/migration10.jpg)
4. Elevate your privilege using sudo command
```bash
sudo su
```
- ![Final Deployment Architecture](/images/migration11.jpg)
+ ![Final Deployment Architecture](/static/images/migration11.jpg)
5. Go to the file directory
```bash
cd /var/lib/mysql-files/
ls -lrt
```
6. You can see all the 6 files copied from the IMDB dataset to the local EC2 directory
- ![Final Deployment Architecture](/images/migration12.jpg)
+ ![Final Deployment Architecture](/static/images/migration12.jpg)
7. Feel free to explore the files.
8. Go to AWS CloudFormation [Stacks](https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks?filteringStatus=active&filteringText=&viewNested=true&hideStacks=false) and click on the stack you created earlier. Go to the Parameters tab and copy the user name and password mentioned next to DbMasterUsername & DbMasterPassword
- ![Final Deployment Architecture](/images/migration13.jpg)
+ ![Final Deployment Architecture](/static/images/migration13.jpg)
9. Go back to EC2 Instance console and login to mysql
```bash
mysql -u DbMasterUsername -pDbMasterPassword
```
- ![Final Deployment Architecture](/images/migration14.jpg)
+ ![Final Deployment Architecture](/static/images/migration14.jpg)
10. Congratulations! You are now connected to a self-managed MySQL source database on EC2. In next steps, we will explore database and tables hosting IMDb datasets
```bash
use imdb;
```
- ![Final Deployment Architecture](/images/migration15.jpg)
+ ![Final Deployment Architecture](/static/images/migration15.jpg)
11. Show all the tables created;
```bash
show tables;
```
- ![Final Deployment Architecture](/images/migration16.jpg)
+ ![Final Deployment Architecture](/static/images/migration16.jpg)
For illustration purpose, below is a logical diagram represents relationship between various source tables hosting IMDb dataset.
@@ -56,7 +56,7 @@ For illustration purpose, below is a logical diagram represents relationship bet
- title_principals has cast and crew information. It's 1:many relationship with title_basics table.
- title_crew has writer and director information. The table is 1:1 related with title_basics table.
- name_basics has cast and crew details. Every member has unique nconst value assigned.
- ![Final Deployment Architecture](/images/migration31.jpg)
+ ![Final Deployment Architecture](/static/images/migration31.jpg)
12. We will create denormalized view with 1:1 static information and get it ready for migration to Amazon DynamoDB table. For now, go ahead and copy below code and paste into the MySQL command line.
The details around target data model will be discussed in the next chapter.
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter04.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter04.en.md
index c3f6a41e..9b0bccee 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter04.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter04.en.md
@@ -10,7 +10,7 @@ You can often query the data from multiple tables and assemble at the presentati
To support high-traffic queries with ultra-low latency, designing a schema to take advantage of a NoSQL system generally makes technical and economic sense.
To start designing a target data model in Amazon DynamoDB that will scale efficiently, you must identify the common access patterns. For the IMDb use case we have identified a set of access patterns as described below:
-![Final Deployment Architecture](/images/migration32.png)
+![Final Deployment Architecture](/static/images/migration32.png)
A common approach to DynamoDB schema design is to identify application layer entities and use denormalization and composite key aggregation to reduce query complexity.
In DynamoDB, this means using [composite sort keys](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-sort-keys.html), [overloaded global secondary indexes](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-gsi-overloading.html), and other design patterns.
@@ -39,11 +39,11 @@ During target modelling the data is migrated as is to the DynamoDB table.
- `RTNG`: Contains IMDb rating and number of votes. This is considered dynamic and frequent changing records for a movie.
In order to reduce I/O during update scenario, the record is not denormalized with other information in the the DynamoDB table.
-![Final Deployment Architecture](/images/migration33.png)
+![Final Deployment Architecture](/static/images/migration33.png)
A new GSI is created on the movies table with new partion key: `nconst` (unique per movie's crew with entity type `nm`) and sort key: `startYear`. This will help to query access pattern by crew member (#6 inside the common access pattern table)
-![Final Deployment Architecture](/images/migration34.png)
+![Final Deployment Architecture](/static/images/migration34.png)
Below small video demonstrates how all of these access pattern are evaluated against target DynamoDB model.
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter05.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter05.en.md
index 7b01e778..3203ee08 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter05.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter05.en.md
@@ -10,16 +10,14 @@ In this exercise, we will set up Database Migration Service (DMS) jobs to migrat
## Verify DMS creation
1. Go to [DMS Console](https://console.aws.amazon.com/dms/v2/home?region=us-east-1#dashboard) and click on Replication Instances. You can able to see a replication instance with Class dms.c5.2xlarge in Available Status.
- ![Final Deployment Architecture](/images/migration20.jpg)
+ ![Final Deployment Architecture](/static/images/migration20.jpg)
-{{% notice note %}}
-_Make sure the DMS instance is Available before you continue. If it is not Available, return to the CloudFormation console to review and troubleshoot the CloudFormation stack._
-{{% /notice %}}
+::alert[_Make sure the DMS instance is Available before you continue. If it is not Available, return to the CloudFormation console to review and troubleshoot the CloudFormation stack._]
## Create source and target endpoints
1. Click on Endpoints and Create endpoint button
- ![Final Deployment Architecture](/images/migration21.jpg)
+ ![Final Deployment Architecture](/static/images/migration21.jpg)
2. Create the source endpoint. Use the following parameters to configure the endpoint:
| Parameter | Value |
@@ -34,9 +32,9 @@ _Make sure the DMS instance is Available before you continue. If it is not Avail
| User name | Value of DbMasterUsername added as parameter during Configure MySQL Environment |
| Password | Value of DbMasterPassword added as parameter during Configure MySQL Environment |
- ![Final Deployment Architecture](/images/migration22.jpg)
+ ![Final Deployment Architecture](/static/images/migration22.jpg)
Open Test endpoint connection (optional) section, then in the VPC drop-down select DMS-VPC and click the Run test button to verify that your endpoint configuration is valid. The test will run for a minute and you should see a successful message in the Status column. Click on the Create endpoint button to create the endpoint. If you see a connection error, re-type the username and password to ensure no mistakes were made. Further, ensure you provided the IPv4 DNS name ending in amazonaws.com in the field **Server name**.
- ![Final Deployment Architecture](/images/migration23.jpg)
+ ![Final Deployment Architecture](/static/images/migration23.jpg)
4. Create the target endpoint. Repeat all steps to create the target endpoint with the following parameter values:
@@ -47,7 +45,7 @@ _Make sure the DMS instance is Available before you continue. If it is not Avail
| Target engine | Amazon DynamoDB |
| Service access role ARN | CloudFormation template has created new role with full access to Amazon DynamoDB. Copy Role ARN from [dynamodb-access](https://us-east-1.console.aws.amazon.com/iamv2/home#/roles/details/dynamodb-access?section=permissions) role |
- ![Final Deployment Architecture](/images/migration24.jpg)
+ ![Final Deployment Architecture](/static/images/migration24.jpg)
Open Test endpoint connection (optional) section, then in the VPC drop-down select DMS-VPC and click the Run test button to verify that your endpoint configuration is valid. The test will run for a minute and you should see a successful message in the Status column. Click on the Create endpoint button to create the endpoint.
## Configure and Run a Replication Task
@@ -68,8 +66,8 @@ Still in the AWS DMS console, go to Database migration tasks and click the Creat
| Task settings: Turn on CloudWatch logs | Checked |
| Table mappings: Editing mode | Select JSON editor option and follow the instructions after below screenshots |
-![Final Deployment Architecture](/images/migration25.jpg)
-![Final Deployment Architecture](/images/migration26.jpg)
+![Final Deployment Architecture](/static/images/migration25.jpg)
+![Final Deployment Architecture](/static/images/migration26.jpg)
Start with the JSON editor section open in your browser. In this section we will create Table mappings JSON document to replace what you see in the JSON editor. This document includes source to target mapping including any transformation on the records that will be performed during migration.
To reduce the loading time during Immersion Day, we have narrowed down the migration list to selective movies. Below JSON document has list of 28 movies worked by Clint Eastwood.
@@ -239,7 +237,7 @@ Replace the string “REPLACE THIS STRING BY MOVIES LIST” with list of movies
}
```
-![Final Deployment Architecture](/images/migration36.png)
+![Final Deployment Architecture](/static/images/migration36.png)
Go to the bottom and click on Create task. At this point the task will be created and will automatically start loading selected movies from source to target DynamoDB table.
You can move forward and create two more tasks with similar steps (historical-migration02 and historical-migration03).
Use the same settings as above except the Table Mappings JSON document. For historical-migration02 and historical-migration03 tasks use the JSON document mentioned below.
@@ -362,12 +360,12 @@ Replace the string "REPLACE THIS STRING BY MOVIES LIST" with list of movies copi
}
```
#### Solutions
-{{%expand "If you are having trouble with making the JSON documents for the tasks, expand this section to get the solutions!" %}}
+::::expand{header="If you are having trouble with making the JSON documents for the tasks, expand this section to get the solutions!"}
-- [First Task - historical-migration01](/files/hands-on-labs/Task_1.json)
-- [Second Task - historical-migration02](/files/hands-on-labs/Task_2.json)
-- [Third Task - historical-migration03](/files/hands-on-labs/Task_3.json)
-{{% /expand%}}
+- [First Task - historical-migration01](:assetUrl{path="/files/hands-on-labs/Task_1.json"})
+- [Second Task - historical-migration02](:assetUrl{path="/files/hands-on-labs/Task_2.json"})
+- [Third Task - historical-migration03](:assetUrl{path="/files/hands-on-labs/Task_3.json"})
+::::
### Monitor and the restart/resume the tasks
The replication task for historical migration will start moving data from MySQL imdb.movies view, title_akas and title_ratings to DynamoDB table will start in a few minutes.
@@ -380,9 +378,7 @@ If you were to run this exercise again but do a full load, the load times would
You can track the status of data loading under the Table statistics of the migration task. Once loading is in progress, feel free to move to the next section of the exercise.
- ![Final Deployment Architecture](/images/migration27.jpg)
+ ![Final Deployment Architecture](/static/images/migration27.jpg)
-{{% notice warning %}}
-_Make sure all tasks are running or complete before you continue. If a task says **Ready**, check its box and choose "Restart/Resume" under the Actions button to start the task._
-{{% /notice %}}
+::alert[_Make sure all tasks are running or complete before you continue. If a task says **Ready**, check its box and choose "Restart/Resume" under the Actions button to start the task._]
diff --git a/content/hands-on-labs/rdbms-migration/migration-chapter06.en.md b/content/hands-on-labs/rdbms-migration/migration-chapter06.en.md
index 0432d78d..6a676aaa 100644
--- a/content/hands-on-labs/rdbms-migration/migration-chapter06.en.md
+++ b/content/hands-on-labs/rdbms-migration/migration-chapter06.en.md
@@ -9,11 +9,11 @@ Using PartiQL, you can easily interact with DynamoDB tables and run ad hoc queri
1. Login to [DynamoDB console](https://console.aws.amazon.com/dynamodbv2/home) and select PartiQL editor from left navigation.
2. Select movies table that was created and loaded by the Database Migration Service job. Select ellipsis next to the table name and click on the scan table.
- ![Final Deployment Architecture](/images/migration28.jpg)
+ ![Final Deployment Architecture](/static/images/migration28.jpg)
We will use PartiQL scripts to demonstrate all 6 access patterns discussed at previous chapter. For our example we will provide you the partition key values, but in real life you will need to make an index of keys perhaps using a GSI.
Get details by the movie: Each IMDB movie has a unique tconst. The denormalized table is created with each row representing a unique combination of movie and crew i.e. tconst and nconst.
Since tconst is part of the partition key for the base table, it can use under WHERE conditions to select the details. Copy below command to run inside PartiQL query editor.
- ![Final Deployment Architecture](/images/migration35.png)
+ ![Final Deployment Architecture](/static/images/migration35.png)
- Find all the cast and crew worked in a movie. Below query will include actor, actress, producer, cinematographer etc. worked in a given movie.
```bash
SELECT * FROM "movies"
@@ -64,8 +64,8 @@ This will allow querying on the new partition key for GSI vs scan on the base ta
| Data type | String|
| Attribute projections | All |
-![Final Deployment Architecture](/images/migration29.jpg)
-![Final Deployment Architecture](/images/migration30.jpg)
+![Final Deployment Architecture](/static/images/migration29.jpg)
+![Final Deployment Architecture](/static/images/migration30.jpg)
5. Finally, click on Create Index. This may take an hour depending on the number of records in the base table.
6. Once the GSI status columns change from Pending to Available, go back to the PartiQL editor to execute a query on GSI.
diff --git a/content/hands-on-labs/setup/aws-ws-event.en.md b/content/hands-on-labs/setup/aws-ws-event.en.md
index c0cf535c..72602ca1 100644
--- a/content/hands-on-labs/setup/aws-ws-event.en.md
+++ b/content/hands-on-labs/setup/aws-ws-event.en.md
@@ -1,9 +1,8 @@
---
-title: "At an AWS Hosted Event"
+title: "Start: At an AWS Hosted Event"
date: 2019-12-02T07:05:12-08:00
weight: 4
chapter: true
-pre: "Start: "
---
### Login to AWS Workshop Studio Portal
@@ -11,22 +10,22 @@ pre: "Start: "
1. If you are provided a one-click join link, use it and skip to step 3.
2. Visit [https://catalog.us-east-1.prod.workshops.aws](https://catalog.us-east-1.prod.workshops.aws). If you attended any other workshop earlier on this portal, please logout first. Click on **Get Started** on the right hand side of the window.
-![Workshop Studio Landing Page](/images/aws-ws-event1.png)
+![Workshop Studio Landing Page](/static/images/aws-ws-event1.png)
3. On the next, **Sign in** page, choose **Email One-Time Passcode (OTP)** to sign in to your workshop page.
-![Sign in page](/images/aws-ws-event2.png)
+![Sign in page](/static/images/aws-ws-event2.png)
4. Provide an email address to receive a one-time passcode.
-![Email address input](/images/aws-ws-event3.png)
+![Email address input](/static/images/aws-ws-event3.png)
5. Enter the passcode that you received in the provided email address, and click **Sign in**.
6. Next, in the textbox, enter the event access code (eg: abcd-012345-ef) that you received from the event facilitators. If you are provided a one-click join link, you will be redirected to the next step automatically.
-![Event access code](/images/aws-ws-event4.png)
+![Event access code](/static/images/aws-ws-event4.png)
7. Select on **I agree with the Terms and Conditions** on the bottom of the next page and click **Join event** to continue to the event dashboard.
8. On the event dashboard, click on **Open AWS console** to federate into AWS Management Console in a new tab. On the same page, click **Get started** to open the workshop instructions.
-![Event dashboard](/images/aws-ws-event5.png)
+![Event dashboard](/static/images/aws-ws-event5.png)
-9. Now that you are connected continue on to: [Step 1]({{< ref "design-patterns/setup/Step1" >}}).
+9. Now that you are connected continue on to: :link[Step 1]{href="/design-patterns/setup/Step1"}.
diff --git a/content/hands-on-labs/setup/cloud9.en.md b/content/hands-on-labs/setup/cloud9.en.md
index 891c4dd9..0ebc1ebe 100644
--- a/content/hands-on-labs/setup/cloud9.en.md
+++ b/content/hands-on-labs/setup/cloud9.en.md
@@ -8,4 +8,4 @@ Let's begin by navigating to [AWS cloud9 management Console](https://console.aws
Then run the command `aws sts get-caller-identity` just to verify that your AWS credentials have been properly configured.
-![Cloud9 Setup](/images/hands-on-labs/setup/cloud9_setup.png)
+![Cloud9 Setup](/static/images/hands-on-labs/setup/cloud9_setup.png)
diff --git a/content/hands-on-labs/setup/_index.en.md b/content/hands-on-labs/setup/index.en.md
similarity index 72%
rename from content/hands-on-labs/setup/_index.en.md
rename to content/hands-on-labs/setup/index.en.md
index eed3db74..42524e3f 100644
--- a/content/hands-on-labs/setup/_index.en.md
+++ b/content/hands-on-labs/setup/index.en.md
@@ -1,10 +1,8 @@
---
-title: "Getting Started"
-menuTitle: "Getting Started"
+title: "1. Getting Started"
date: 2021-04-21T07:33:04-05:00
weight: 10
chapter: true
-pre: "1. "
---
@@ -12,4 +10,4 @@ In this chapter, we'll cover the prerequisites needed to get started with [Amazo
The deployment architecture that you will be building in this lab will look like the below.
-![Final Deployment Architecture](/images/hands-on-labs/setup/dynamodb_lab_architecture.png)
+![Final Deployment Architecture](/static/images/hands-on-labs/setup/dynamodb_lab_architecture.png)
diff --git a/content/hands-on-labs/setup/load-sample-data.en.md b/content/hands-on-labs/setup/load-sample-data.en.md
index a4b5608d..1c80fb46 100644
--- a/content/hands-on-labs/setup/load-sample-data.en.md
+++ b/content/hands-on-labs/setup/load-sample-data.en.md
@@ -33,4 +33,4 @@ After each data load you should get this message saying that there were no Unpro
```
#### Sample output
-![Cloud9 Setup](/images/hands-on-labs/setup/load_data.png)
+![Cloud9 Setup](/static/images/hands-on-labs/setup/load_data.png)
diff --git a/content/hands-on-labs/setup/setup.en.md b/content/hands-on-labs/setup/setup.en.md
index 249b29b9..eaa01aed 100644
--- a/content/hands-on-labs/setup/setup.en.md
+++ b/content/hands-on-labs/setup/setup.en.md
@@ -1,38 +1,28 @@
---
-title: "Environment Set up"
-menuTitle: "On your own"
-date: 2021-04-21T07:39:31-05:00
+title: "Start: On your own"
+date: 2019-12-02T07:05:12-08:00
weight: 5
-pre: "Start: "
+chapter: true
---
-{{% notice warning %}}
-These setup instructions are identitical for LADV, LHOL, and LGME - all of which use the same Cloud9 template.
-Only complete this section once, and only if you're running it on your own account.
-{{% /notice %}}
+::alert[These setup instructions are identitical for LADV, LHOL, and LGME - all of which use the same Cloud9 template. Only complete this section once, and only if you're running it on your own account.]{type="warning"}
-{{% notice info %}}
-Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re:Invent, Immersion Day, etc), go to [At an AWS hosted Event]({{< ref "/hands-on-labs/setup/aws-ws-event">}})
-{{% /notice %}}
+::alert[Only complete this section if you are running the workshop on your own. If you are at an AWS hosted event (such as re\:Invent, Immersion Day, etc), go to :link[At an AWS hosted Event]{href="/hands-on-labs/setup/aws-ws-event"}]
## Launch the CloudFormation stack
-{{% notice warning %}}
-During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.
-{{% /notice %}}
+::alert[During the course of the lab, you will make DynamoDB tables that will incur a cost that could approach tens or hundreds of dollars per day. Ensure you delete the DynamoDB tables using the DynamoDB console, and make sure you delete the CloudFormation stack as soon as the lab is complete.]
-1. Launch the CloudFormation template in US East 1 to deploy the resources in your account:
-
- *Optionally, download [the YAML template]({{% siteparam "design_patterns_s3_lab_yaml" %}}) and launch it your own way*
+1. Launch the CloudFormation template in US West 2 to deploy the resources in your account: [![CloudFormation](/static/images/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/new?stackName=amazon-dynamodb-labs&templateURL=:param{key="design_patterns_s3_lab_yaml"})
+ 1. *Optionally, download [the YAML template](:param{key="design_patterns_s3_lab_yaml"}) and launch it your own way*
1. Click *Next* on the first dialog.
1. In the Parameters section, note the *Timeout* is set to zero. This means the Cloud9 instance will not sleep; you may want to change this manually to a value such as 60 to protect against unexpected charges if you forget to delete the stack at the end.
Leave the *WorkshopZIP* parameter unchanged and click *Next*
-![CloudFormation parameters](/images/awsconsole1.png)
+![CloudFormation parameters](/static/images/awsconsole1.png)
1. Scroll to the bottom and click *Next*, and then review the *Template* and *Parameters*. When you are ready to create the stack, scroll to the bottom, check the box acknowledging the creation of IAM resources, and click *Create stack*.
-![CloudFormation parameters](/images/awsconsole2.png)
+![CloudFormation parameters](/static/images/awsconsole2.png)
The stack will create a Cloud9 lab instance, a role for the instance, and a role for the AWS Lambda function used later on in the lab. It will use Systems Manager to configure the Cloud9 instance.
-
-1. After the CloudFormation stack is `CREATE_COMPLETE`, [continue onto Prerequisites]({{< ref "hands-on-labs/setup/prerequisites" >}}).
+1. After the CloudFormation stack is `CREATE_COMPLETE`, :link[continue onto Prerequisites]{href="/hands-on-labs/setup/prerequisites"}.
diff --git a/content/_index.en.md b/content/index.en.md
similarity index 93%
rename from content/_index.en.md
rename to content/index.en.md
index c56503f0..dd876f85 100644
--- a/content/_index.en.md
+++ b/content/index.en.md
@@ -6,7 +6,7 @@ chapter: true
weight: 1
---
-![Open the DynamoDB Logo](/images/Amazon-DynamoDB.png)
+![Open the DynamoDB Logo](/static/images/Amazon-DynamoDB.png)
Welcome to the AWS Workshop and Lab Content Portal for [Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html), a key-value and document database that delivers single-digit millisecond performance at any scale. Here you will find a collection of workshops and hands-on content aimed at helping you gain an understanding of DynamoDB features and NoSQL data modeling best practices.
@@ -16,4 +16,4 @@ Prior expertise with AWS and NoSQL databases is beneficial but not required to c
If you're brand new to DynamoDB with no experience, you may want to begin with *Hands-on Labs for Amazon DynamoDB*. If you want to learn the design patterns for DynamoDB, check out *Advanced Design Patterns for DynamoDB* and the *Design Challenges* scenarios.
Dive into the content:
-{{% children depth="1" description="true" %}}
+::children{depth=1}
diff --git a/content/scenarios/Bank Payments/_index.en.md b/content/scenarios/Bank Payments/index.en.md
similarity index 100%
rename from content/scenarios/Bank Payments/_index.en.md
rename to content/scenarios/Bank Payments/index.en.md
diff --git a/content/scenarios/Retail Cart/_index.en.md b/content/scenarios/Retail Cart/index.en.md
similarity index 100%
rename from content/scenarios/Retail Cart/_index.en.md
rename to content/scenarios/Retail Cart/index.en.md
diff --git a/content/scenarios/_index.en.md b/content/scenarios/index.en.md
similarity index 84%
rename from content/scenarios/_index.en.md
rename to content/scenarios/index.en.md
index 629fb043..c577bab3 100644
--- a/content/scenarios/_index.en.md
+++ b/content/scenarios/index.en.md
@@ -1,12 +1,11 @@
---
-title: "Design Challenges"
+title: "LDC: Design Challenges"
chapter: true
-weight: 2
+weight: 20
description: "400 level: A collection of data model design challenge labs to help you understand the decisions required when building efficient data models."
-pre: "LDC: "
---
## Design Challenges
-![Open the NoSQL Workbench Logo](/images/nosql_wb.png)
+![Open the NoSQL Workbench Logo](/static/images/nosql_wb.png)
This is a collection of data model design challenge scenarios to help you understand the decisions you make when building efficient data models. While not required for this section, the **[NoSQL Workbench for Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/workbench.html)**, is an excellent tool to help build, visualize, and manipulate data models for DynamoDB.
diff --git a/content/reference-materials/_index.en.md b/content/scenarios/reference-materials/index.en.md
similarity index 96%
rename from content/reference-materials/_index.en.md
rename to content/scenarios/reference-materials/index.en.md
index 41828c40..5ef11f3b 100644
--- a/content/reference-materials/_index.en.md
+++ b/content/scenarios/reference-materials/index.en.md
@@ -1,8 +1,7 @@
---
-title: "NoSQL Design: Reference Materials"
+title: "Links: NoSQL Design: Reference Materials"
chapter: true
description: "A set of links and reference materials describing how DynamoDB works and best practices for building data models."
-pre: "Links: "
weight: 3
---
diff --git a/contentspec.yaml b/contentspec.yaml
new file mode 100644
index 00000000..a91eda99
--- /dev/null
+++ b/contentspec.yaml
@@ -0,0 +1,14 @@
+# This is a stub to allow preview_build to run. Changes to this file do not make it to Workshop Studio
+version: 2.0
+
+defaultLocaleCode: en-US
+localeCodes:
+ - en-US
+params:
+ latest_rh_design_pattern_yt: "https://www.youtube.com/watch?v=xfxBhvGpoa0"
+ design_patterns_s3_lab_yaml : "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/C9.yaml"
+ lhol_migration_setup_yaml : "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/migration-env-setup.yaml"
+ lhol_migration_dms_setup_yaml : "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/migration-dms-setup.yaml"
+ event_driven_architecture_lab_yaml : "https://s3.amazonaws.com/amazon-dynamodb-labs.com/assets/event-driven-cfn.yaml"
+ github_contributing_guide : "https://github.com/aws-samples/amazon-dynamodb-labs/blob/master/CONTRIBUTING.md"
+ github_issues_link : "https://github.com/aws-samples/amazon-dynamodb-labs/issues"