diff --git a/amazon-kinesis/2.1/antora.yml b/amazon-kinesis/2.1/antora.yml
new file mode 100644
index 0000000000..ba2b2f90cf
--- /dev/null
+++ b/amazon-kinesis/2.1/antora.yml
@@ -0,0 +1,17 @@
+name: amazon-kinesis-connector
+version: '2.0'
+display_version: 2.0 (Mule 4)
+title: Amazon Kinesis Connector
+nav:
+- modules/ROOT/nav.adoc
+asciidoc:
+ attributes:
+ page-component-desc: Provides access to standard Amazon Kinesis Data Stream operations using Anypoint Platform.
+ page-connector-type: Connector
+ page-connector-level: Select
+ page-exchange-group-id: com.mulesoft.connectors
+ page-exchange-asset-id: amazon-kinesis-data-streams-connector
+ page-runtime-version: 4.3.0
+ page-release-notes-page: release-notes::connector/amazon-kinesis-connector-release-notes-mule-4.adoc
+ page-vendor-name: amazon
+ page-vendor-title: Amazon
diff --git a/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-access-advance-configuration.png b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-access-advance-configuration.png
new file mode 100644
index 0000000000..13769c747b
Binary files /dev/null and b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-access-advance-configuration.png differ
diff --git a/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-access-configuration.png b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-access-configuration.png
new file mode 100644
index 0000000000..2a9201e5ac
Binary files /dev/null and b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-access-configuration.png differ
diff --git a/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-checkpoint.png b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-checkpoint.png
new file mode 100644
index 0000000000..ff4d57889f
Binary files /dev/null and b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-checkpoint.png differ
diff --git a/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-listener.png b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-listener.png
new file mode 100644
index 0000000000..2c3e4b1a47
Binary files /dev/null and b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-listener.png differ
diff --git a/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-put-record.png b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-put-record.png
new file mode 100644
index 0000000000..e42c067083
Binary files /dev/null and b/amazon-kinesis/2.1/modules/ROOT/images/amazon-kinesis-example-put-record.png differ
diff --git a/amazon-kinesis/2.1/modules/ROOT/nav.adoc b/amazon-kinesis/2.1/modules/ROOT/nav.adoc
new file mode 100644
index 0000000000..bcf78dd283
--- /dev/null
+++ b/amazon-kinesis/2.1/modules/ROOT/nav.adoc
@@ -0,0 +1,6 @@
+.xref:index.adoc[Amazon Kinesis Connector]
+* xref:index.adoc[Amazon Kinesis Connector Overview]
+* xref:amazon-kinesis-connector-studio.adoc[Using Anypoint Studio to Configure Amazon Kinesis Connector]
+* xref:amazon-kinesis-connector-xml-maven.adoc[Amazon Kinesis Connector XML and Maven Support]
+* xref:amazon-kinesis-connector-examples.adoc[Amazon Kinesis Connector Examples]
+* xref:amazon-kinesis-connector-reference.adoc[Amazon Kinesis Connector Reference]
diff --git a/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-examples.adoc b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-examples.adoc
new file mode 100644
index 0000000000..b58f1fda63
--- /dev/null
+++ b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-examples.adoc
@@ -0,0 +1,129 @@
+= Amazon Kinesis Data Streams Connector 2.0 Examples - Mule 4
+
+The following example shows how to use Anypoint Connector for Amazon Kinesis Data Streams (Amazon Kinesis Data Streams Connector) to put data records and listen from an Amazon Kinesis data stream.
+
+Before you try the example, access Anypoint Studio (Studio) and verify that the *Mule Palette* view displays entries for Kinesis. If not, follow the instructions in xref:amazon-kinesis-connector-studio.adoc#add-connector-to-project[Add the Connector to Your Project].
+
+== Flows for This Example
+
+The following screenshots show the Anypoint Studio app flows for this example:
+
+* This flow uses the *Put Records* operation to put data into the Amazon Kinesis data stream. In this flow, *HTTP > Listener* receives a query parameter named *data* that sets the payload for the *Put Record* operation. It uses a query parameter named *partitionKey* to set the partition key value for the *Put Record* operation.
++
+image::amazon-kinesis-example-put-record.png[Put Record operation flow]
++
+* This flow uses the *Listener* source to listen for new data records:
++
+image::amazon-kinesis-example-listener.png[Listener source flow]
++
+* This flow uses the *Listener* source to listen for new data records. After receiving the new records, it calls the *Checkpoint* operation to set a manual checkpoint:
++
+image::amazon-kinesis-example-checkpoint.png[Checkpoint operation flow]
++
+* Amazon Configuration
++
+image::amazon-kinesis-access-configuration.png[]
++
+* Amazon Configuration
++
+image::amazon-kinesis-access-advance-configuration.png[]
+
+== XML Code for This Example
+
+[source,xml,linenums]
+----
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+----
+
+=== See Also
+
+* xref:connectors::introduction/introduction-to-anypoint-connectors.adoc[Introduction to Anypoint Connectors]
+* https://help.mulesoft.com[MuleSoft Help Center]
diff --git a/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-reference.adoc b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-reference.adoc
new file mode 100644
index 0000000000..d9b737f1e5
--- /dev/null
+++ b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-reference.adoc
@@ -0,0 +1,533 @@
+= Amazon Kinesis Data Streams Connector 2.0 Reference - Mule 4
+
+
+
+NOTE: Amazon Kinesis Data Streams Connector doesn’t support Windows because it uses the Kinesis Producer Library (KPL), which deprecated Windows support starting in version 0.14.0.
+
+== Configurations
+---
+[[Config]]
+=== Config
+
+The configuration for the connector. You can use this configuration for both the producer and consumer.
+
+==== Parameters
+
+[%header%autowidth.spread]
+|===
+| Name | Type | Description | Default Value | Required
+|Name | String | The name for this configuration. Connectors reference the configuration with this name. | | x
+| Connection a| <> | The connection types to provide to this configuration. | | x
+| Aggregation Enabled a| Boolean | When set to `true`, this option packs multiple user data records into a single Amazon Kinesis data record. | true |
+| Response Timeout a| Number | The maximum total time that can elapse between the start of an HTTP request and when the responses are received. If this time is exceeded, the request times out.| 30 |
+| Request Timeout Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | A time unit that qualifies the *Response Timeout* value | SECONDS |
+| Record Max Buffered Time a| Number | Maximum amount of time that an Amazon Kinesis data record spends buffering before it is sent. Records can be sent earlier than this value, depending on other buffering limits. | 3 |
+| Record Max Buffered Time Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | A time unit that qualifies the *Record Max Buffered Time* value| SECONDS |
+| Aggregation Max Count a| Number | Maximum number of items to pack into an aggregated Amazon Kinesis data record. | 4294967295 |
+| Aggregation Max Size a| Number | Maximum data size to pack into an aggregated Amazon Kinesis data record. | 51200 |
+| Aggregation Max Size Unit a| Enumeration, one of:
+
+** BYTE
+** KB
+** MB
+** GB | A data unit that qualifies the *Aggregation Max Size* value | BYTE |
+| Collection Max Count a| Number | Maximum number of items to pack into a batch request. | 500 |
+| Collection Max Size a| Number | Maximum amount of data to send with a *Put Record* operation. | 5242880 |
+| Collection Max Size Unit a| Enumeration, one of:
+
+** BYTE
+** KB
+** MB
+** GB | A data unit that qualifies the *Collection Max Size* value | BYTE |
+| Connection Timeout a| Number | The amount of time to wait when initially establishing the TCP connection between the connector and Amazon Kinesis server before throwing an exception if the connection fails | 30 |
+| Connection Timeout Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | The time unit for the *Connection Timeout* value | SECONDS |
+| Max Connections a| Number | Maximum number of connections to open to the backend. HTTP requests are sent in parallel over multiple connections. Setting this value too high can impact latency and consume additional resources without increasing throughput. | 24 |
+| Min Connections a| Number | Minimum number of connections to keep open to the backend. In most cases, there is no need to increase value. | 1 |
+| Rate Limit a| Number | Limits the number of data records per second and number of bytes per second sent to a shard. This limit can reduce bandwidth and CPU cycle wastage from sending requests that will fail from throttling. The default value, 150%, enables a single producer instance to completely saturate the allowance for a shard. To reduce throttling errors rather than completely saturate the shard, consider reducing this setting. | 150% |
+| Record TTL a| Number | Time limit for data records to be put. Records that are not successfully put within this limit fail. When setting this value, take into consideration the fact that data records still incur network latency after they leave the Amazon Kinesis Producer Library (KPL). | 30 |
+| Record TTL Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | The time unit for the *Record TTL* value. | SECONDS |
+| Thread Pool Size a| Number | Maximum number of threads with which to configure the native process thread pool. The default value, 0, enables the KPL process to choose the size of the thread pool. | 0 |
+|===
+
+==== Connection Types
+[[Config_Connection]]
+===== Kinesis Connection
+
+Connects to AWS Kinesis through the use of a user-specified access and secret key and, optionally, a role. These credentials securely sign requests sent to AWS services.
+
+====== Parameters
+
+[%header%autowidth.spread]
+|===
+| Name | Type | Description | Default Value | Required
+| Proxy Configuration a| <> | Proxy connection settings for outbound connections. This setting applies to the *Listener* source and *Checkpoint* operation. It does not affect the *Put Record* operation. | |
+| TLS Configuration a| <> | Protocol to use for communication. When using the HTTPS protocol, the HTTP communication is secured using TLS or SSL. If HTTPS is configured as the protocol, then, at a minimum, you must configure the keystore in the `tls:context` child element of the `listener-config`.
+{sp} +
+{sp} +
+This setting applies to the *Listener* source and *Checkpoint* operation. It does not affect the *Put Record* operation.
+| HTTP|
+| Access Key a| String | Access key provided by Amazon. | | x
+| Secret Key a| String | Secret key provided by Amazon. | | x
+| Region Endpoint a| String | Topic region endpoint | us-east-1 |
+| Role a| <> | Role configuration | | {nbsp}
+| Dynamo DB Endpoint a| String | Sets a Dynamo DB Endpoint to track and maintain state information. | | {nbsp}
+| Cloud Watch Endpoint a| String | Sets a Cloud Watch Endpoint for monitoring and observability. | | {nbsp}
+| Connection Timeout a| Number | The amount of time to wait when initially establishing the TCP connection between the connector and Amazon Kinesis server before throwing an exception if the connection fails | 30 |
+| Connection Timeout Time Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | The time unit for the *Connection Timeout* field. | SECONDS |
+| Response Timeout a| Number | The maximum time to wait between the time an HTTP request is sent and when a response is received | 30 |
+| Response Timeout Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | Time unit for the *Response Timeout* value. | SECONDS |
+| Reconnection a| <> | When the application is deployed, a connectivity test is performed on all connectors. If set to `true`, deployment fails if the test doesn't pass after exhausting the associated reconnection strategy. | |
+|===
+
+== Supported Operations
+* <>
+* <>
+
+== Associated Sources
+* <>
+
+== Operations
+
+[[Checkpoint]]
+== Checkpoint
+``
+
+The operation checkpoint position of a listener into the Amazon DynamoDB table. Upon failover, the connector starts fetching records that are located after the check pointed position. The *Checkpoint* operation does not modify the current position of the listener.
+
+=== Parameters
+
+[%header%autowidth.spread]
+|===
+| Name | Type | Description | Default Value | Required
+| Configuration | String | The name of the configuration to use. | | x
+| Stream Name a| String | The stream name. | | *x*
+| Application Name a| String | Name of the DynamoDB table that holds data about the current stream positions. | | x
+| Shard ID a| String | The shard identifier. | |
+| Sequence Number a| String | A sequence number at which to checkpoint in the shard. | |
+| Subsequence Number a| Number | A subsequence number at which to checkpoint within the shard. | |
+| Config Ref a| ConfigurationProvider | The name of the configuration to use to execute this component | | *x*
+|===
+
+
+=== For Configurations
+
+* <>
+
+=== Throws
+
+* KINESIS:ILLEGAL_STATE
+* KINESIS:INVALID_ARGUMENT
+* KINESIS:PROVISIONED_THROUGHPUT_EXCEEDED
+* KINESIS:TIMEOUT
+* KINESIS:UNAUTHORIZED
+
+[[PutRecord]]
+== Put Record
+``
+
+Puts a data record into an Amazon Kinesis data stream.
+
+=== Parameters
+
+[%header%autowidth.spread]
+|===
+| Name | Type | Description | Default Value | Required
+| Configuration | String | The name of the configuration to use. | | x
+| Stream Name a| String | The stream name | | x
+| Partition Key a| String | The partition key. If not provided, the connector generates a UUID for this value. | |
+| Explicit Hash Key a| String | Overrides the Amazon Kinesis Data Streams explicitHashKey value, which is normally computed with using MD5 function of the data record partition key. | |
+| Data a| Any | The content to put in the Amazon Kinesis data stream. This content can be up to 1 MB in size. | #[payload] |
+| Config Ref a| ConfigurationProvider | The name of the configuration to use to execute this component | | x
+| Streaming Strategy a| * <>
+* <>
+* non-repeatable-stream | Disables the repeatable stream functionality and uses non-repeatable streams to have lower performance overhead, memory use, and costs | |
+| Target Variable a| String | Name of the variable in which to store the operation's output | |
+| Target Value a| String | Expression that evaluates the operation's output. The expression outcome is stored in the target variable. | #[payload] |
+| Reconnection Strategy a| * <>
+* <> | A retry strategy in case of connectivity errors | |
+|===
+
+=== Output
+
+[%autowidth.spread]
+|===
+|Type |Any
+|===
+
+=== For Configurations
+
+* <>
+
+=== Throws
+
+* KINESIS:CONNECTIVITY
+* KINESIS:INVALID_ARGUMENT
+* KINESIS:PROCESSING
+* KINESIS:RETRY_EXHAUSTED
+* KINESIS:TIMEOUT
+* KINESIS:UNAUTHORIZED
+
+== Sources
+
+[[Listener]]
+== Listener
+``
+
+Uses the Amazon Kinesis Client Library (KCL) to listen from a specified point in a stream.
+
+=== Parameters
+
+[%header%autowidth.spread]
+|===
+| Name | Type | Description | Default Value | Required
+| Configuration | String | The name of the configuration to use. | | x
+| Output Mime Type a| String | The MIME type of the payload that this operation outputs. | |
+| Output Encoding a| String | The encoding of the payload that this operation outputs. | |
+| Config Ref a| ConfigurationProvider | The name of the configuration to use to execute this component | | x
+| Primary Node Only a| Boolean | Whether this listener executes on the primary node only when running in a cluster | |
+| On Capacity Overload a| Enumeration, one of:
+
+** WAIT
+** DROP | Strategy that Mule applies when the flow receives more messages than it can handle | WAIT |
+| Redelivery Policy a| <> | Policy for processing the redelivery of a message | |
+| Stream Name a| String | The stream name. | | x
+| Application Name a| String | Name of the DynamoDB table that holds data about current stream positions. If the table doesn't exist, then the connector creates a new one at the initial position. | | x
+| Shard Configs a| Array of <> | The shard configuration. If specified, the listener listens only from selected shards. | |
+| Max Batch Size a| Number | The maximum number of data records that a batch can carry. | 10000 |
+| Max Leases For Worker a| Number | Number of shards to which the listener can subscribe in parallel. | 2137483647 |
+| Shard Prioritization a| Enumeration, one of:
+
+** PARENTS_FIRST_SHARD_PRIORITIZATION
+** NO_OP_SHARD_PRIORITIZATION | Logic used to prioritize or filter the shards before their execution.
+{sp} +
+{sp} +
+PARENTS_FIRST_SHARD_PRIORITIZATION
+Prioritizes parent shards first and limits the number of shards that are available for initialization based on their depth (*Max Depth* field).
+
+NO_OP_SHARD_PRIORITIZATION
+Returns the original list of shards without any modifications.
+| NO_OP_SHARD_PRIORITIZATION |
+| Max Depth a| Number | Used by the PARENTS_FIRST_SHARD_PRIORITIZATION shard prioritization option. Any shard that is deeper than this value is excluded from processing. | |
+| Read Capacity Units a| Number | The maximum number of strongly consistent reads consumed per second before Amazon DynamoDB returns a ThrottlingException. | 10 |
+| Write Capacity Units a| Number | The maximum number of writes consumed per second before Amazon DynamoDB returns a ThrottlingException. | 10 |
+| Failover Time a| Number | Duration of a lease for a worker. This parameter also determines the frequency with which a worker looks for new leases to work on. The frequency is roughly twice the specified value. | 10 |
+| Failover Time Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | Time unit for the *Failover Time* field. | SECONDS |
+| Checkpoint On Complete a| Boolean | If `true`, the listener checkpoints the current reading position to Amazon DynamoDB after data records are processed by the owning flow, without taking into account whether the processing finished successfully. | true |
+| Absolute Position a| Enumeration, one of:
+
+** LATEST
+** FIRST | Whether the listener listens from the latest data record or the oldest data record | |
+| Timestamp a| DateTime | Date and time from which the listener listens | |
+| Reconnection Strategy a| * <>
+* <> | A retry strategy in case of connectivity errors | |
+|===
+
+=== Output
+
+[%autowidth.spread]
+|===
+|Type |Array of binary messages with [<>]
+|===
+
+=== For Configurations
+
+* <>
+
+== Types
+[[ProxyConfiguration]]
+=== Proxy Configuration
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Host a| String | Host where the proxy requests are sent. | | x
+| Port a| Number | Port where the proxy requests are sent. | | x
+| Username a| String | The username to authenticate against the proxy. | |
+| Password a| String | The password to authenticate against the proxy. | |
+| Non Proxy Hosts a| Array of String | A list of hosts against which the proxy should not be used. | |
+| Ntlm Domain a| String | The domain to authenticate against the proxy. | |
+|===
+
+[[Tls]]
+=== TLS
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Enabled Protocols a| String | A comma-separated list of protocols enabled for this context. | |
+| Enabled Cipher Suites a| String | A comma-separated list of cipher suites enabled for this context. | |
+| Trust Store a| <> | | |
+| Key Store a| <> | | |
+| Revocation Check a| * <>
+* <>
+* <> | | |
+|===
+
+[[TrustStore]]
+=== Trust Store
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Path a| String | The location (which is resolved relative to the current classpath and file system, if possible) of the truststore. | |
+| Password a| String | The password used to protect the truststore. | |
+| Type a| String | The type of store used. | |
+| Algorithm a| String | The algorithm used by the truststore. | |
+| Insecure a| Boolean | If set to `true`, no certificate validations are performed, which makes connections vulnerable to attacks. Use at your own risk. | |
+|===
+
+[[KeyStore]]
+=== Key Store
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Path a| String | The location (which is resolved relative to the current classpath and file system, if possible) of the keystore. | |
+| Type a| String | The type of store used. | |
+| Alias a| String | When the keystore contains many private keys, this attribute indicates the alias of the key that should be used. If not defined, the first key in the file is used by default. | |
+| Key Password a| String | The password used to protect the private key. | |
+| Password a| String | The password used to protect the keystore. | |
+| Algorithm a| String | The algorithm used by the keystore. | |
+|===
+
+[[StandardRevocationCheck]]
+=== Standard Revocation Check
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Only End Entities a| Boolean | Verifies the last element of the certificate chain only. | |
+| Prefer Crls a| Boolean | Tries CRL instead of OCSP first. | |
+| No Fallback a| Boolean | Does not use the secondary checking method. | |
+| Soft Fail a| Boolean | Avoids a certification failure when the revocation server cannot be reached or is busy. | |
+|===
+
+[[CustomOcspResponder]]
+=== Custom Ocsp Responder
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Url a| String | The URL of the OCSP responder. | |
+| Cert Alias a| String | Alias of the signing certificate for the OCSP response (must be in the truststore), if present. | |
+|===
+
+[[CrlFile]]
+=== Crl File
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Path a| String | The path to the CRL file. | |
+|===
+
+[[Role]]
+=== Role
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Arn a| String | The Amazon Resource Name (ARN) of the role to assume. | | x
+| External Id a| String | A unique identifier that might be required when you assume a role in another account. If the administrator of the
+account to which the role belongs provides an external ID, then provide that value in this field. | |
+| Duration a| Number | The duration of the role session. | 3600 |
+| Duration Time Unit a| Enumeration, one of:
+
+** NANOSECONDS
+** MICROSECONDS
+** MILLISECONDS
+** SECONDS
+** MINUTES
+** HOURS
+** DAYS | Time unit for the *Duration* field | SECONDS |
+| Referred Policy Arns a| Array of String | The Amazon Resource Names (ARNs) of the IAM-managed policies to use as managed session policies.
+The policies must exist in the same account as the role. | |
+| Tags a| Object | A list of session tags that you want to pass. Each session tag consists of a key name and an associated value. | |
+|===
+
+[[Reconnection]]
+=== Reconnection
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Fails Deployment a| Boolean | When the application is deployed, a connectivity test is performed on all connectors. If set to `true`, deployment fails if the test doesn't pass after exhausting the associated reconnection strategy. | |
+| Reconnection Strategy a| * <>
+* <> | The reconnection strategy to use. | |
+|===
+
+[[Reconnect]]
+=== Reconnect
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Frequency a| Number | How often in milliseconds to reconnect | |
+| Count a| Number | How many reconnection attempts to make. | |
+|===
+
+[[ReconnectForever]]
+=== Reconnect Forever
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Frequency a| Number | How often in milliseconds to reconnect | |
+|===
+
+[[RecordAttributes]]
+=== Record Attributes
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Aggregated a| Boolean | | |
+| Approximate Arrival Timestamp a| String | | |
+| Encryption Type a| String | | |
+| Partition Key a| String | | |
+| Partition Key a| String | | |
+| Sequence Number a| String | | |
+| Shard Id a| String | | |
+| Sub Sequence Number a| Number | | |
+|===
+
+[[RedeliveryPolicy]]
+=== Redelivery Policy
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Max Redelivery Count a| Number | The maximum number of times a message can be redelivered and processed unsuccessfully before triggering a process-failed message | |
+| Message Digest Algorithm a| String | The secure hashing algorithm to use. | SHA-256|
+| Message Identifier a| <> | The strategy used to identify the messages. | |
+| Object Store a| ObjectStore | The object store that stores the redelivery counter for each message. | |
+|===
+
+[[RedeliveryPolicyMessageIdentifier]]
+=== Redelivery Policy Message Identifier
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Use Secure Hash a| Boolean | Whether to use a secure hash algorithm to identify a redelivered message. | |
+| Id Expression a| String | One or more expressions to use to determine when a message was redelivered. This property can be set only if the value of the *Use Secure Hash* field is `false`. | |
+|===
+
+[[ShardConfig]]
+=== Shard Config
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Shard Id a| String | The shard identifier. | | x
+| Shard Initial Position a| <> | The shard initial position. | | x
+|===
+
+[[ShardInitialPosition]]
+=== Shard Initial Position
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| At Sequence Number a| String | The listener starts listening at this position in the data stream. | |
+| After Sequence Number a| String | The listener starts listening after this position in the data stream. | |
+| Absolute Position a| Enumeration, one of:
+
+** LATEST
+** FIRST | The listener starts listening from the latest or oldest record, depending on the value. | |
+| Timestamp a| DateTime | | |
+|===
+
+[[RepeatableInMemoryStream]]
+=== Repeatable In Memory Stream
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| Initial Buffer Size a| Number | The amount of memory to allocate to consume the stream and provide random access to it. If the stream contains more data than can fit into this buffer, then the buffer expands according to the *Buffer Size Increment* value, with an upper limit of the *Max In Memory Size* value. | |
+| Buffer Size Increment a| Number | This is by how much the buffer size expands if it exceeds its initial size. Setting a value of zero or lower means that the buffer should not expand, meaning that a `STREAM_MAXIMUM_SIZE_EXCEEDED` error is raised when the buffer is full. | |
+| Max Buffer Size a| Number | The maximum amount of memory to use. If more memory is used, then a `STREAM_MAXIMUM_SIZE_EXCEEDED` error is raised. A value less than or equal to zero means no limit. | |
+| Buffer Unit a| Enumeration, one of:
+
+** BYTE
+** KB
+** MB
+** GB | The unit in which the fields in this table are expressed | |
+|===
+
+[[RepeatableFileStoreStream]]
+=== Repeatable File Store Stream
+
+[%header,cols="20s,25a,30a,15a,10a"]
+|===
+| Field | Type | Description | Default Value | Required
+| In Memory Size a| Number | Maximum memory that the stream should use to keep data in memory. If more than that is consumed, content on the disk is buffered. | |
+| Buffer Unit a| Enumeration, one of:
+
+** BYTE
+** KB
+** MB
+** GB | The unit in which the *In Memory Size* value is expressed | |
+|===
+
+== See Also
+
+* xref:connectors::introduction/introduction-to-anypoint-connectors.adoc[Introduction to Anypoint Connectors]
+* xref:amazon-kinesis-connector-reference.adoc[Reference]
+* https://help.mulesoft.com[MuleSoft Help Center]
diff --git a/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-studio.adoc b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-studio.adoc
new file mode 100644
index 0000000000..0598600cd0
--- /dev/null
+++ b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-studio.adoc
@@ -0,0 +1,153 @@
+= Using Anypoint Studio to Configure Amazon Kinesis Data Streams Connector 2.0 - Mule 4
+
+Anypoint Studio (Studio) editors help you design and update your Mule applications, properties, and configuration files.
+
+To add and configure a connector in Studio:
+
+. <>.
+. <>.
+. <>.
+. <>
+
+When you run the connector, you can view the app log to check for problems, as described in <>.
+
+If you are new to configuring connectors in Studio, see xref:connectors::introduction/intro-config-use-studio.adoc[Using Anypoint Studio to Configure a Connector].
+
+[[create-mule-project]]
+== Create a Mule Project
+
+In Studio, create a new Mule project in which to add and configure the connector:
+
+. In Studio, select *File > New > Mule Project*.
+. Enter a name for your Mule project and click *Finish*.
+
+[[add-connector-to-project]]
+== Add the Connector to Your Mule Project
+
+Add Anypoint Connector for Amazon Kinesis Data Streams Connector (Amazon Kinesis Data Streams Connector) to your Mule project to automatically populate the XML code with the connector's namespace and schema location and add the required dependencies to the project's `pom.xml` file:
+
+. In the *Mule Palette* view, click *(X) Search in Exchange*.
+. In *Add Modules to Project*, type `kinesis` in the search field.
+. Click *Amazon Kinesis Data Streams Connector* in *Available modules*.
+. Click *Add*.
+. Click *Finish*.
+
+Adding a connector to a Mule project in Studio does not make that connector available to other projects in your Studio workspace.
+
+=== Configure a Source
+
+A source initiates a flow when a specified condition is met.
+You can configure one of these sources to use with Amazon Kinesis Data Streams Connector:
+
+* *Listener*
++
+Enables you to subscribe to a particular data stream. This source uses the Kinesis Client Library (KCL) to listen from a specified initial point in the data stream. It returns dynamically sized batches of records, however, the max size can also be configured.
++
+* *HTTP > Listener*
++
+Initiates a flow each time it receives a request on the configured host and port
++
+* *Scheduler*
++
+Initiates a flow when a time-based condition is met
+
+For example, to configure the Amazon Kinesis *Listener* source, follow these steps:
+
+. In the *Mule Palette* view, select *Kinesis*.
+. Drag *Listener* to the Studio canvas.
+. On the *Listener* properties window, optionally change the value of the *Display Name* field.
+. Click the plus sign (*+*) next to the *Connector configuration* field to configure a global element that can be used by all instances of the Amazon Kinesis Connector in the app.
+. On the *General* tab, complete the following fields:
++
+[%header,cols="30s,70a"]
+|===
+|Field a|Description
+|Name | Name used to reference this connector instance
+|Access Key | Access key provided by Amazon
+|Secret Key | Secret key provided by Amazon
+|Region Endpoint | URL entry point for a web service
+|Role | IAM role, if any
+|Dynamo DB Endpoint| URL to the AWS Dynamo DB
+|Cloud Watch Endpoint | URL to the AWS Cloud Watch service
+|===
++
+. On the *Security* tab, optionally specify the TLS information for the connector.
+. On the *Advanced* tab, optionally specify reconnection information, including a reconnection strategy.
+. Click *Test Connection* to confirm that Mule can connect with the specified server.
+. Click *OK*.
+. In the *Listener Params* section of the *Listener* properties window, optionally enter values for the fields.
+
+[[add-connector-operation]]
+== Add a Connector Operation to the Flow
+
+When you add a connector operation to your flow, you immediately define a specific operation for that connector to perform.
+
+You can use the following operations:
+
+* *Checkpoint*, which keeps track of the current position in the Kinesis data stream
+* *Seek*, which moves the reading position of the sources (listeners)
++
+While moving forward in the stream is fluent, moving backward causes an internal restart of the Kinesis Client Library (KCL) instance. Therefore, use this operation sparingly to move backward in a stream.
++
+* *Put Record*, which puts a record into the Kinesis data stream.
++
+If the *Aggregation Enabled* field in the global configuration *Advanced* tab is `true` (the default), then multiple records are combined and stored in one Kinesis Data Streams record.
++
+Records include binary data, a partition key, and an explicit hash key. The explicit hash key value is normally computed with the MD5 function of the record partition key, but you can override the value by using the *Explicit Hash Key* field. The operation uses the Kinesis Producer Library (KPL), which increases throughput.
+
+To add an operation for Amazon Kinesis Data Streams Connector, follow these steps:
+
+. In the *Mule Palette* view, select *Kinesis* and then select the desired operation.
+. Drag the operation onto the Studio canvas to the right of the source.
+
+[[configure-global-element]]
+== Configure a Global Element for the Connector
+
+When you configure a connector, it's best to configure a global element that all instances of that connector in the app can use. Configuring a global element requires you to provide the authentication credentials that the connector requires to access the target Amazon Kinesis Data Streams system.
+
+Amazon Kinesis Data Streams Connector supports Access Key authentication, which uses *Access Key* and *Secret Key* field values for AWS Identity and Access Management (IAM).
+
+When you configure a global element, you can reference a configuration file that contains ANT-style property placeholders (recommended), or you can enter your authorization credentials in the global configuration properties. For information about the benefits of using property placeholders and how to configure them, see xref:connectors::introduction/intro-connector-configuration-overview.adoc[Anypoint Connector Configuration].
+
+To configure a global element, follow these steps:
+
+. Select the name of the connector in the Studio canvas.
+. In the configuration screen for the operation, click the plus sign (*+*) next to the *Connector configuration* field to access the global element configuration fields.
+. On the *General* tab, configure the following fields.
++
+[%header,cols="30s,70a"]
+|===
+|Field |Description
+|Name | Name used to reference the connector instance
+|Access Key | Access key provided by Amazon
+|Secret Key | Secret key provided by Amazon
+|Region Endpoint | URL entry point for a web service
+|Role | IAM role, if any
+|Dynamo DB Endpoint| URL to the AWS Dynamo DB
+|Cloud Watch Endpoint | URL to the AWS Cloud Watch service
+|===
++
+. On the *Advanced* tab, optionally specify proxy, STS, Kinesis and reconnection information, including a reconnection strategy.
+. Click *Test Connection* to confirm that Mule can connect with the specified server.
+. Click *OK*.
+
+[[view-app-log]]
+
+== View the App Log
+
+To check for problems, you can view the app log as follows:
+
+* If you're running the app from Anypoint Platform, the app log output is visible in the Anypoint Studio console window.
+* If you're running the app using Mule from the command line, the app log output is visible in your OS console.
+
+Unless the log file path is customized in the app's log file (`log4j2.xml`), you can also view the app log in the default location `MULE_HOME/logs/.log`. You can configure the location of the log path in the app log file `log4j2.xml`.
+
+== Next Step
+
+After you configure a global element and connection information, configure the other fields for the connector.
+
+== See Also
+
+* xref:connectors::introduction/introduction-to-anypoint-connectors.adoc[Introduction to Anypoint Connectors]
+* xref:amazon-kinesis-connector-reference.adoc[Reference]
+* https://help.mulesoft.com[MuleSoft Help Center]
diff --git a/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-xml-maven.adoc b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-xml-maven.adoc
new file mode 100644
index 0000000000..564b10998f
--- /dev/null
+++ b/amazon-kinesis/2.1/modules/ROOT/pages/amazon-kinesis-connector-xml-maven.adoc
@@ -0,0 +1,78 @@
+= Amazon Kinesis Data Streams Connector 2.0 XML and Maven Support - Mule 4
+ifndef::env-site,env-github[]
+include::_attributes.adoc[]
+endif::[]
+
+Although you can manually code a Mule app in XML, it is more efficient to use Anypoint Studio.
+
+If you manually code a Mule runtime engine (Mule) app in XML either from the Anypoint Studio XML editor or from a text editor, you can access the connector from your app by adding reference statements to both your XML Mule flow and the Apache Maven `pom.xml` file.
+
+When you add the connector in Studio, Studio automatically populates the XML code with the connector's namespace and schema location and it also adds a dependency to the `pom.xml` file.
+
+== Add a Namespace for the Connector
+
+Paste the following code inside the `mule` tag of the header of your configuration XML:
+
+[source,xml,linenums]
+----
+http://www.mulesoft.org/schema/mule/amazon-kinesis
+http://www.mulesoft.org/schema/mule/amazon-kinesis/current/mule-amazon-kinesis.xsd
+----
+
+This example shows how the namespace and schema statements are placed in the XML:
+
+[source,xml,linenums]
+----
+
+ com.mulesoft.connectors
+ amazon-kinesis-data-streams-connector
+ x.x.x
+ mule-plugin
+
+----
+
+Replace `x.x.x` with the version that corresponds to the connector you are using.
+
+To obtain the most up-to-date `pom.xml` file information:
+
+. Go to https://www.mulesoft.com/exchange/[Anypoint Exchange].
+. In Exchange, click *Login* and supply your Anypoint Platform username and password.
+. In Exchange, search for `amazon kinesis`.
+. Select the connector.
+. Click *Dependency Snippets* near the upper right of the screen.
+
+== Next Step
+
+After completing your namespace and `pom.xml` file, you can try the xref:amazon-kinesis-connector-examples.adoc[Examples], which provides one or more use cases for the connector.
+
+== See Also
+
+* xref:connectors::introduction/introduction-to-anypoint-connectors.adoc[Introduction to Anypoint Connectors]
+* https://help.mulesoft.com[MuleSoft Help Center]
diff --git a/amazon-kinesis/2.1/modules/ROOT/pages/index.adoc b/amazon-kinesis/2.1/modules/ROOT/pages/index.adoc
new file mode 100644
index 0000000000..3970aa42cc
--- /dev/null
+++ b/amazon-kinesis/2.1/modules/ROOT/pages/index.adoc
@@ -0,0 +1,75 @@
+= Amazon Kinesis Data Streams Connector 2.0 - Mule 4
+
+
+
+Anypoint Connector for Amazon Kinesis Data Streams (Amazon Kinesis Data Streams Connector) provides access to standard Amazon Kinesis Data Stream operations using Anypoint Platform.
+
+Amazon Kinesis is a streaming solution built by Amazon AWS to collect and process large streams of data records in real time. To learn more, see the Amazon Kinesis official documentation.
+
+For compatibility information and fixed issues, see the Amazon Kinesis Data Streams Release Notes.
+
+NOTE: Amazon Kinesis Data Streams Connector doesn’t support Windows because it uses the Kinesis Producer Library (KPL), which deprecated Windows support starting in version 0.14.0.
+
+== Before You Begin
+
+To use this connector, you must be familiar with:
+
+* The Amazon Kinesis Data Streams API
+* Anypoint Connectors
+* Mule runtime engine (Mule)
+* Elements and global elements in a Mule flow
+* Creating a Mule app using Anypoint Studio (Studio)
+
+Before creating an app, you must have:
+
+* AWS Identity and Access Management (IAM) Access Key credentials with access rights to the Kinesis, DynamoDB, and CloudWatch services. You can find the required permissions https://docs.aws.amazon.com/streams/latest/dev/tutorial-stock-data-kplkcl-iam.html[here].
+* Access to the Amazon Kinesis target resource
+* Anypoint Studio version 4.3.0 or later
+
+== Common Use Cases for the Connector
+
+Common use cases for Kinesis Data Streams connector include the following:
+
+* Troubleshooting
++
+Collect log and event data from sources such as servers, desktops, and mobile devices. Then continuously process the data, generate metrics, power live dashboards, and put the aggregated data into data stores such as Amazon S3.
++
+* Real-time analytics
++
+Run real-time analytics on high-frequency event data such as sensor data or social media data collected by Amazon Kinesis Data Streams. These analytics enable you to gain insights from your data at a frequency of minutes instead of a frequency of hours or days.
++
+* Mobile data capture
++
+Have your mobile applications push data to Amazon Kinesis data streams from hundreds of thousands of devices, making the data available to you as soon as it is produced on the mobile devices.
+
+For examples, see xref:amazon-kinesis-connector-examples.adoc[Examples].
+
+== Audience
+
+* New users, read:
+** xref:amazon-kinesis-connector-studio.adoc[Using Anypoint Studio to Configure the Connector] to create your Mule app
+** xref:amazon-kinesis-connector-examples.adoc[Examples], which provide one or more use cases for the connector
+* Advanced users, read:
+** xref:amazon-kinesis-connector-xml-maven.adoc[XML and Maven Support]
+** xref:amazon-kinesis-connector-examples.adoc[Examples], which provides one or more use cases for the connector
+
+== Authentication Types
+
+Amazon Kinesis Data Streams Connector supports Access Key authentication. With this type of authentication, the connector authenticates to AWS by using an access key, secret key, and optionally, a role.
+
+For information about configuring authentication, see xref:amazon-kinesis-connector-studio.adoc[Using Anypoint Studio to Configure Amazon Kinesis Data Streams].
+
+== Using Exchange Templates and Examples
+
+https://www.mulesoft.com/exchange/[Anypoint Exchange] provides templates
+that you can use as starting points for your apps and examples that illustrate a complete solution.
+
+== Next Step
+
+After you complete the prerequisites and try the templates and examples, you are ready to create your own app and configure the connector using xref:amazon-kinesis-connector-studio.adoc[Anypoint Studio].
+
+== See Also
+
+* xref:connectors::introduction/introduction-to-anypoint-connectors.adoc[Introduction to Anypoint Connectors]
+* xref:connectors::introduction/intro-use-exchange.adoc[Use Exchange to Discover Connectors, Templates, and Examples]
+* https://help.mulesoft.com[MuleSoft Help Center]