From 41f8fb4b95355080ce24fb9378b9618de3d0fd75 Mon Sep 17 00:00:00 2001
From: AWS <>
Date: Wed, 27 Nov 2024 21:35:44 +0000
Subject: [PATCH 01/35] Update to next snapshot version: 2.29.24-SNAPSHOT
---
archetypes/archetype-app-quickstart/pom.xml | 2 +-
archetypes/archetype-lambda/pom.xml | 2 +-
archetypes/archetype-tools/pom.xml | 2 +-
archetypes/pom.xml | 2 +-
aws-sdk-java/pom.xml | 2 +-
bom-internal/pom.xml | 2 +-
bom/pom.xml | 2 +-
bundle-logging-bridge/pom.xml | 2 +-
bundle-sdk/pom.xml | 2 +-
bundle/pom.xml | 2 +-
codegen-lite-maven-plugin/pom.xml | 2 +-
codegen-lite/pom.xml | 2 +-
codegen-maven-plugin/pom.xml | 2 +-
codegen/pom.xml | 2 +-
core/annotations/pom.xml | 2 +-
core/arns/pom.xml | 2 +-
core/auth-crt/pom.xml | 2 +-
core/auth/pom.xml | 2 +-
core/aws-core/pom.xml | 2 +-
core/checksums-spi/pom.xml | 2 +-
core/checksums/pom.xml | 2 +-
core/crt-core/pom.xml | 2 +-
core/endpoints-spi/pom.xml | 2 +-
core/http-auth-aws-crt/pom.xml | 2 +-
core/http-auth-aws-eventstream/pom.xml | 2 +-
core/http-auth-aws/pom.xml | 2 +-
core/http-auth-spi/pom.xml | 2 +-
core/http-auth/pom.xml | 2 +-
core/identity-spi/pom.xml | 2 +-
core/imds/pom.xml | 2 +-
core/json-utils/pom.xml | 2 +-
core/metrics-spi/pom.xml | 2 +-
core/pom.xml | 2 +-
core/profiles/pom.xml | 2 +-
core/protocols/aws-cbor-protocol/pom.xml | 2 +-
core/protocols/aws-json-protocol/pom.xml | 2 +-
core/protocols/aws-query-protocol/pom.xml | 2 +-
core/protocols/aws-xml-protocol/pom.xml | 2 +-
core/protocols/pom.xml | 2 +-
core/protocols/protocol-core/pom.xml | 2 +-
core/protocols/smithy-rpcv2-protocol/pom.xml | 2 +-
core/regions/pom.xml | 2 +-
core/retries-spi/pom.xml | 2 +-
core/retries/pom.xml | 2 +-
core/sdk-core/pom.xml | 2 +-
http-client-spi/pom.xml | 2 +-
http-clients/apache-client/pom.xml | 2 +-
http-clients/aws-crt-client/pom.xml | 2 +-
http-clients/netty-nio-client/pom.xml | 2 +-
http-clients/pom.xml | 2 +-
http-clients/url-connection-client/pom.xml | 2 +-
.../cloudwatch-metric-publisher/pom.xml | 2 +-
metric-publishers/pom.xml | 2 +-
pom.xml | 4 +-
release-scripts/pom.xml | 2 +-
services-custom/dynamodb-enhanced/pom.xml | 2 +-
services-custom/iam-policy-builder/pom.xml | 2 +-
services-custom/pom.xml | 2 +-
.../s3-event-notifications/pom.xml | 2 +-
services-custom/s3-transfer-manager/pom.xml | 2 +-
services/accessanalyzer/pom.xml | 2 +-
services/account/pom.xml | 2 +-
services/acm/pom.xml | 2 +-
services/acmpca/pom.xml | 2 +-
services/amp/pom.xml | 2 +-
services/amplify/pom.xml | 2 +-
services/amplifybackend/pom.xml | 2 +-
services/amplifyuibuilder/pom.xml | 2 +-
services/apigateway/pom.xml | 2 +-
services/apigatewaymanagementapi/pom.xml | 2 +-
services/apigatewayv2/pom.xml | 2 +-
services/appconfig/pom.xml | 2 +-
services/appconfigdata/pom.xml | 2 +-
services/appfabric/pom.xml | 2 +-
services/appflow/pom.xml | 2 +-
services/appintegrations/pom.xml | 2 +-
services/applicationautoscaling/pom.xml | 2 +-
services/applicationcostprofiler/pom.xml | 2 +-
services/applicationdiscovery/pom.xml | 2 +-
services/applicationinsights/pom.xml | 2 +-
services/applicationsignals/pom.xml | 2 +-
services/appmesh/pom.xml | 2 +-
services/apprunner/pom.xml | 2 +-
services/appstream/pom.xml | 2 +-
services/appsync/pom.xml | 2 +-
services/apptest/pom.xml | 2 +-
services/arczonalshift/pom.xml | 2 +-
services/artifact/pom.xml | 2 +-
services/athena/pom.xml | 2 +-
services/auditmanager/pom.xml | 2 +-
services/autoscaling/pom.xml | 2 +-
services/autoscalingplans/pom.xml | 2 +-
services/b2bi/pom.xml | 2 +-
services/backup/pom.xml | 2 +-
services/backupgateway/pom.xml | 2 +-
services/batch/pom.xml | 2 +-
services/bcmdataexports/pom.xml | 2 +-
services/bcmpricingcalculator/pom.xml | 2 +-
services/bedrock/pom.xml | 2 +-
services/bedrockagent/pom.xml | 2 +-
services/bedrockagentruntime/pom.xml | 2 +-
services/bedrockruntime/pom.xml | 2 +-
services/billing/pom.xml | 2 +-
services/billingconductor/pom.xml | 2 +-
services/braket/pom.xml | 2 +-
services/budgets/pom.xml | 2 +-
services/chatbot/pom.xml | 2 +-
services/chime/pom.xml | 2 +-
services/chimesdkidentity/pom.xml | 2 +-
services/chimesdkmediapipelines/pom.xml | 2 +-
services/chimesdkmeetings/pom.xml | 2 +-
services/chimesdkmessaging/pom.xml | 2 +-
services/chimesdkvoice/pom.xml | 2 +-
services/cleanrooms/pom.xml | 2 +-
services/cleanroomsml/pom.xml | 2 +-
services/cloud9/pom.xml | 2 +-
services/cloudcontrol/pom.xml | 2 +-
services/clouddirectory/pom.xml | 2 +-
services/cloudformation/pom.xml | 2 +-
services/cloudfront/pom.xml | 2 +-
services/cloudfrontkeyvaluestore/pom.xml | 2 +-
services/cloudhsm/pom.xml | 2 +-
services/cloudhsmv2/pom.xml | 2 +-
services/cloudsearch/pom.xml | 2 +-
services/cloudsearchdomain/pom.xml | 2 +-
services/cloudtrail/pom.xml | 2 +-
services/cloudtraildata/pom.xml | 2 +-
services/cloudwatch/pom.xml | 2 +-
services/cloudwatchevents/pom.xml | 2 +-
services/cloudwatchlogs/pom.xml | 2 +-
services/codeartifact/pom.xml | 2 +-
services/codebuild/pom.xml | 2 +-
services/codecatalyst/pom.xml | 2 +-
services/codecommit/pom.xml | 2 +-
services/codeconnections/pom.xml | 2 +-
services/codedeploy/pom.xml | 2 +-
services/codeguruprofiler/pom.xml | 2 +-
services/codegurureviewer/pom.xml | 2 +-
services/codegurusecurity/pom.xml | 2 +-
services/codepipeline/pom.xml | 2 +-
services/codestarconnections/pom.xml | 2 +-
services/codestarnotifications/pom.xml | 2 +-
services/cognitoidentity/pom.xml | 2 +-
services/cognitoidentityprovider/pom.xml | 2 +-
services/cognitosync/pom.xml | 2 +-
services/comprehend/pom.xml | 2 +-
services/comprehendmedical/pom.xml | 2 +-
services/computeoptimizer/pom.xml | 2 +-
services/config/pom.xml | 2 +-
services/connect/pom.xml | 2 +-
services/connectcampaigns/pom.xml | 2 +-
services/connectcampaignsv2/pom.xml | 2 +-
services/connectcases/pom.xml | 2 +-
services/connectcontactlens/pom.xml | 2 +-
services/connectparticipant/pom.xml | 2 +-
services/controlcatalog/pom.xml | 2 +-
services/controltower/pom.xml | 2 +-
services/costandusagereport/pom.xml | 2 +-
services/costexplorer/pom.xml | 2 +-
services/costoptimizationhub/pom.xml | 2 +-
services/customerprofiles/pom.xml | 2 +-
services/databasemigration/pom.xml | 2 +-
services/databrew/pom.xml | 2 +-
services/dataexchange/pom.xml | 2 +-
services/datapipeline/pom.xml | 2 +-
services/datasync/pom.xml | 2 +-
services/datazone/pom.xml | 2 +-
services/dax/pom.xml | 2 +-
services/deadline/pom.xml | 2 +-
services/detective/pom.xml | 2 +-
services/devicefarm/pom.xml | 2 +-
services/devopsguru/pom.xml | 2 +-
services/directconnect/pom.xml | 2 +-
services/directory/pom.xml | 2 +-
services/directoryservicedata/pom.xml | 2 +-
services/dlm/pom.xml | 2 +-
services/docdb/pom.xml | 2 +-
services/docdbelastic/pom.xml | 2 +-
services/drs/pom.xml | 2 +-
services/dynamodb/pom.xml | 2 +-
services/ebs/pom.xml | 2 +-
services/ec2/pom.xml | 2 +-
services/ec2instanceconnect/pom.xml | 2 +-
services/ecr/pom.xml | 2 +-
services/ecrpublic/pom.xml | 2 +-
services/ecs/pom.xml | 2 +-
services/efs/pom.xml | 2 +-
services/eks/pom.xml | 2 +-
services/eksauth/pom.xml | 2 +-
services/elasticache/pom.xml | 2 +-
services/elasticbeanstalk/pom.xml | 2 +-
services/elasticinference/pom.xml | 2 +-
services/elasticloadbalancing/pom.xml | 2 +-
services/elasticloadbalancingv2/pom.xml | 2 +-
services/elasticsearch/pom.xml | 2 +-
services/elastictranscoder/pom.xml | 2 +-
services/emr/pom.xml | 2 +-
services/emrcontainers/pom.xml | 2 +-
services/emrserverless/pom.xml | 2 +-
services/entityresolution/pom.xml | 2 +-
services/eventbridge/pom.xml | 2 +-
services/evidently/pom.xml | 2 +-
services/finspace/pom.xml | 2 +-
services/finspacedata/pom.xml | 2 +-
services/firehose/pom.xml | 2 +-
services/fis/pom.xml | 2 +-
services/fms/pom.xml | 2 +-
services/forecast/pom.xml | 2 +-
services/forecastquery/pom.xml | 2 +-
services/frauddetector/pom.xml | 2 +-
services/freetier/pom.xml | 2 +-
services/fsx/pom.xml | 2 +-
services/gamelift/pom.xml | 2 +-
services/geomaps/pom.xml | 2 +-
services/geoplaces/pom.xml | 2 +-
services/georoutes/pom.xml | 2 +-
services/glacier/pom.xml | 2 +-
services/globalaccelerator/pom.xml | 2 +-
services/glue/pom.xml | 2 +-
services/grafana/pom.xml | 2 +-
services/greengrass/pom.xml | 2 +-
services/greengrassv2/pom.xml | 2 +-
services/groundstation/pom.xml | 2 +-
services/guardduty/pom.xml | 2 +-
services/health/pom.xml | 2 +-
services/healthlake/pom.xml | 2 +-
services/iam/pom.xml | 2 +-
services/identitystore/pom.xml | 2 +-
services/imagebuilder/pom.xml | 2 +-
services/inspector/pom.xml | 2 +-
services/inspector2/pom.xml | 2 +-
services/inspectorscan/pom.xml | 2 +-
services/internetmonitor/pom.xml | 2 +-
services/iot/pom.xml | 2 +-
services/iot1clickdevices/pom.xml | 2 +-
services/iot1clickprojects/pom.xml | 2 +-
services/iotanalytics/pom.xml | 2 +-
services/iotdataplane/pom.xml | 2 +-
services/iotdeviceadvisor/pom.xml | 2 +-
services/iotevents/pom.xml | 2 +-
services/ioteventsdata/pom.xml | 2 +-
services/iotfleethub/pom.xml | 2 +-
services/iotfleetwise/pom.xml | 2 +-
services/iotjobsdataplane/pom.xml | 2 +-
services/iotsecuretunneling/pom.xml | 2 +-
services/iotsitewise/pom.xml | 2 +-
services/iotthingsgraph/pom.xml | 2 +-
services/iottwinmaker/pom.xml | 2 +-
services/iotwireless/pom.xml | 2 +-
services/ivs/pom.xml | 2 +-
services/ivschat/pom.xml | 2 +-
services/ivsrealtime/pom.xml | 2 +-
services/kafka/pom.xml | 2 +-
services/kafkaconnect/pom.xml | 2 +-
services/kendra/pom.xml | 2 +-
services/kendraranking/pom.xml | 2 +-
services/keyspaces/pom.xml | 2 +-
services/kinesis/pom.xml | 2 +-
services/kinesisanalytics/pom.xml | 2 +-
services/kinesisanalyticsv2/pom.xml | 2 +-
services/kinesisvideo/pom.xml | 2 +-
services/kinesisvideoarchivedmedia/pom.xml | 2 +-
services/kinesisvideomedia/pom.xml | 2 +-
services/kinesisvideosignaling/pom.xml | 2 +-
services/kinesisvideowebrtcstorage/pom.xml | 2 +-
services/kms/pom.xml | 2 +-
services/lakeformation/pom.xml | 2 +-
services/lambda/pom.xml | 2 +-
services/launchwizard/pom.xml | 2 +-
services/lexmodelbuilding/pom.xml | 2 +-
services/lexmodelsv2/pom.xml | 2 +-
services/lexruntime/pom.xml | 2 +-
services/lexruntimev2/pom.xml | 2 +-
services/licensemanager/pom.xml | 2 +-
.../licensemanagerlinuxsubscriptions/pom.xml | 2 +-
.../licensemanagerusersubscriptions/pom.xml | 2 +-
services/lightsail/pom.xml | 2 +-
services/location/pom.xml | 2 +-
services/lookoutequipment/pom.xml | 2 +-
services/lookoutmetrics/pom.xml | 2 +-
services/lookoutvision/pom.xml | 2 +-
services/m2/pom.xml | 2 +-
services/machinelearning/pom.xml | 2 +-
services/macie2/pom.xml | 2 +-
services/mailmanager/pom.xml | 2 +-
services/managedblockchain/pom.xml | 2 +-
services/managedblockchainquery/pom.xml | 2 +-
services/marketplaceagreement/pom.xml | 2 +-
services/marketplacecatalog/pom.xml | 2 +-
services/marketplacecommerceanalytics/pom.xml | 2 +-
services/marketplacedeployment/pom.xml | 2 +-
services/marketplaceentitlement/pom.xml | 2 +-
services/marketplacemetering/pom.xml | 2 +-
services/marketplacereporting/pom.xml | 2 +-
services/mediaconnect/pom.xml | 2 +-
services/mediaconvert/pom.xml | 2 +-
services/medialive/pom.xml | 2 +-
services/mediapackage/pom.xml | 2 +-
services/mediapackagev2/pom.xml | 2 +-
services/mediapackagevod/pom.xml | 2 +-
services/mediastore/pom.xml | 2 +-
services/mediastoredata/pom.xml | 2 +-
services/mediatailor/pom.xml | 2 +-
services/medicalimaging/pom.xml | 2 +-
services/memorydb/pom.xml | 2 +-
services/mgn/pom.xml | 2 +-
services/migrationhub/pom.xml | 2 +-
services/migrationhubconfig/pom.xml | 2 +-
services/migrationhuborchestrator/pom.xml | 2 +-
services/migrationhubrefactorspaces/pom.xml | 2 +-
services/migrationhubstrategy/pom.xml | 2 +-
services/mq/pom.xml | 2 +-
services/mturk/pom.xml | 2 +-
services/mwaa/pom.xml | 2 +-
services/neptune/pom.xml | 2 +-
services/neptunedata/pom.xml | 2 +-
services/neptunegraph/pom.xml | 2 +-
services/networkfirewall/pom.xml | 2 +-
services/networkmanager/pom.xml | 2 +-
services/networkmonitor/pom.xml | 2 +-
services/notifications/pom.xml | 2 +-
services/notificationscontacts/pom.xml | 2 +-
services/oam/pom.xml | 2 +-
services/observabilityadmin/pom.xml | 2 +-
services/omics/pom.xml | 2 +-
services/opensearch/pom.xml | 2 +-
services/opensearchserverless/pom.xml | 2 +-
services/opsworks/pom.xml | 2 +-
services/opsworkscm/pom.xml | 2 +-
services/organizations/pom.xml | 2 +-
services/osis/pom.xml | 2 +-
services/outposts/pom.xml | 2 +-
services/panorama/pom.xml | 2 +-
services/partnercentralselling/pom.xml | 2 +-
services/paymentcryptography/pom.xml | 2 +-
services/paymentcryptographydata/pom.xml | 2 +-
services/pcaconnectorad/pom.xml | 2 +-
services/pcaconnectorscep/pom.xml | 2 +-
services/pcs/pom.xml | 2 +-
services/personalize/pom.xml | 2 +-
services/personalizeevents/pom.xml | 2 +-
services/personalizeruntime/pom.xml | 2 +-
services/pi/pom.xml | 2 +-
services/pinpoint/pom.xml | 2 +-
services/pinpointemail/pom.xml | 2 +-
services/pinpointsmsvoice/pom.xml | 2 +-
services/pinpointsmsvoicev2/pom.xml | 2 +-
services/pipes/pom.xml | 2 +-
services/polly/pom.xml | 2 +-
services/pom.xml | 2 +-
services/pricing/pom.xml | 2 +-
services/privatenetworks/pom.xml | 2 +-
services/proton/pom.xml | 2 +-
services/qapps/pom.xml | 2 +-
services/qbusiness/pom.xml | 2 +-
services/qconnect/pom.xml | 2 +-
services/qldb/pom.xml | 2 +-
services/qldbsession/pom.xml | 2 +-
services/quicksight/pom.xml | 2 +-
services/ram/pom.xml | 2 +-
services/rbin/pom.xml | 2 +-
services/rds/pom.xml | 2 +-
services/rdsdata/pom.xml | 2 +-
services/redshift/pom.xml | 2 +-
services/redshiftdata/pom.xml | 2 +-
services/redshiftserverless/pom.xml | 2 +-
services/rekognition/pom.xml | 2 +-
services/repostspace/pom.xml | 2 +-
services/resiliencehub/pom.xml | 2 +-
services/resourceexplorer2/pom.xml | 2 +-
services/resourcegroups/pom.xml | 2 +-
services/resourcegroupstaggingapi/pom.xml | 2 +-
services/robomaker/pom.xml | 2 +-
services/rolesanywhere/pom.xml | 2 +-
services/route53/pom.xml | 2 +-
services/route53domains/pom.xml | 2 +-
services/route53profiles/pom.xml | 2 +-
services/route53recoverycluster/pom.xml | 2 +-
services/route53recoverycontrolconfig/pom.xml | 2 +-
services/route53recoveryreadiness/pom.xml | 2 +-
services/route53resolver/pom.xml | 2 +-
services/rum/pom.xml | 2 +-
services/s3/pom.xml | 2 +-
services/s3control/pom.xml | 2 +-
services/s3outposts/pom.xml | 2 +-
services/sagemaker/pom.xml | 2 +-
services/sagemakera2iruntime/pom.xml | 2 +-
services/sagemakeredge/pom.xml | 2 +-
services/sagemakerfeaturestoreruntime/pom.xml | 2 +-
services/sagemakergeospatial/pom.xml | 2 +-
services/sagemakermetrics/pom.xml | 2 +-
services/sagemakerruntime/pom.xml | 2 +-
services/savingsplans/pom.xml | 2 +-
services/scheduler/pom.xml | 2 +-
services/schemas/pom.xml | 2 +-
services/secretsmanager/pom.xml | 2 +-
services/securityhub/pom.xml | 2 +-
services/securitylake/pom.xml | 2 +-
.../serverlessapplicationrepository/pom.xml | 2 +-
services/servicecatalog/pom.xml | 2 +-
services/servicecatalogappregistry/pom.xml | 2 +-
services/servicediscovery/pom.xml | 2 +-
services/servicequotas/pom.xml | 2 +-
services/ses/pom.xml | 2 +-
services/sesv2/pom.xml | 2 +-
services/sfn/pom.xml | 2 +-
services/shield/pom.xml | 2 +-
services/signer/pom.xml | 2 +-
services/simspaceweaver/pom.xml | 2 +-
services/sms/pom.xml | 2 +-
services/snowball/pom.xml | 2 +-
services/snowdevicemanagement/pom.xml | 2 +-
services/sns/pom.xml | 2 +-
services/socialmessaging/pom.xml | 2 +-
services/sqs/pom.xml | 2 +-
services/ssm/pom.xml | 2 +-
services/ssmcontacts/pom.xml | 2 +-
services/ssmincidents/pom.xml | 2 +-
services/ssmquicksetup/pom.xml | 2 +-
services/ssmsap/pom.xml | 2 +-
services/sso/pom.xml | 2 +-
services/ssoadmin/pom.xml | 2 +-
services/ssooidc/pom.xml | 2 +-
services/storagegateway/pom.xml | 2 +-
services/sts/pom.xml | 2 +-
services/supplychain/pom.xml | 2 +-
services/support/pom.xml | 2 +-
services/supportapp/pom.xml | 2 +-
services/swf/pom.xml | 2 +-
services/synthetics/pom.xml | 2 +-
services/taxsettings/pom.xml | 2 +-
services/textract/pom.xml | 2 +-
services/timestreaminfluxdb/pom.xml | 2 +-
services/timestreamquery/pom.xml | 2 +-
services/timestreamwrite/pom.xml | 2 +-
services/tnb/pom.xml | 2 +-
services/transcribe/pom.xml | 2 +-
services/transcribestreaming/pom.xml | 2 +-
services/transfer/pom.xml | 2 +-
services/translate/pom.xml | 2 +-
services/trustedadvisor/pom.xml | 2 +-
services/verifiedpermissions/pom.xml | 2 +-
services/voiceid/pom.xml | 2 +-
services/vpclattice/pom.xml | 2 +-
services/waf/pom.xml | 2 +-
services/wafv2/pom.xml | 2 +-
services/wellarchitected/pom.xml | 2 +-
services/wisdom/pom.xml | 2 +-
services/workdocs/pom.xml | 2 +-
services/workmail/pom.xml | 2 +-
services/workmailmessageflow/pom.xml | 2 +-
services/workspaces/pom.xml | 2 +-
services/workspacesthinclient/pom.xml | 2 +-
services/workspacesweb/pom.xml | 2 +-
services/xray/pom.xml | 2 +-
test/auth-tests/pom.xml | 2 +-
.../pom.xml | 2 +-
test/bundle-shading-tests/pom.xml | 2 +-
test/codegen-generated-classes-test/pom.xml | 2 +-
test/crt-unavailable-tests/pom.xml | 2 +-
test/http-client-tests/pom.xml | 2 +-
test/module-path-tests/pom.xml | 2 +-
.../pom.xml | 2 +-
test/protocol-tests-core/pom.xml | 2 +-
test/protocol-tests/pom.xml | 2 +-
test/region-testing/pom.xml | 2 +-
test/ruleset-testing-core/pom.xml | 2 +-
test/s3-benchmarks/pom.xml | 2 +-
test/sdk-benchmarks/pom.xml | 2 +-
test/sdk-native-image-test/pom.xml | 2 +-
test/service-test-utils/pom.xml | 2 +-
test/stability-tests/pom.xml | 2 +-
test/test-utils/pom.xml | 2 +-
test/tests-coverage-reporting/pom.xml | 2 +-
test/v2-migration-tests/pom.xml | 2 +-
third-party/pom.xml | 2 +-
third-party/third-party-jackson-core/pom.xml | 2 +-
.../pom.xml | 2 +-
third-party/third-party-slf4j-api/pom.xml | 2 +-
utils/pom.xml | 2 +-
v2-migration/pom.xml | 2 +-
.../rewrite/upgrade-sdk-dependencies.yml | 766 +++++++++---------
482 files changed, 865 insertions(+), 865 deletions(-)
diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml
index 4ec79327392..e11d3a1d3cd 100644
--- a/archetypes/archetype-app-quickstart/pom.xml
+++ b/archetypes/archetype-app-quickstart/pom.xml
@@ -20,7 +20,7 @@
Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service. Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service. Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account. This operation applies only to policy types other than service control policies (SCPs). For more information about policy inheritance, see Understanding management policy inheritance in the Organizations User Guide. This operation can be called from any account in the organization. Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account. This operation applies only to management policies. It does not apply to authorization policies: service control policies (SCPs) and resource control policies (RCPs). For more information about policy inheritance, see Understanding management policy inheritance in the Organizations User Guide. This operation can be called from any account in the organization. The type of policy to create. You can specify one of the following values: The type of policy to create. You can specify one of the following values: The type of policy that you want information about. You can specify one of the following values: The type of policy that you want information about. You can specify one of the following values: The policy type that you want to disable in this root. You can specify one of the following values: The policy type that you want to disable in this root. You can specify one of the following values: The policy type that you want to enable. You can specify one of the following values: The policy type that you want to enable. You can specify one of the following values: The type of policy that you want to include in the returned list. You must specify one of the following values: The type of policy that you want to include in the returned list. You must specify one of the following values: Specifies the type of policy that you want to include in the response. You must specify one of the following values: Specifies the type of policy that you want to include in the response. You must specify one of the following values: Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the Creates a web app based on specified parameters, and returns the ID for the new web app. Deletes the user belonging to a file transfer protocol-enabled server you specify. No response returns from this operation. When you delete a user from a server, the user's information is lost. Deletes the specified web app. Deletes the Describes the user assigned to the specific file transfer protocol-enabled server, as identified by its The response from this call returns the properties of the user associated with the Describes the web app that's identified by Describes the web app customization object that's identified by Lists the users for a file transfer protocol-enabled server that you specify by passing the Lists all web apps associated with your Amazon Web Services account for your current region. Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the The response returns the In the console, you can select Restricted when you create or update a user. This ensures that the user can't access anything outside of their home directory. The programmatic way to configure this behavior is to update the user. Set their For example, if the user's home directory is Assigns new properties to a web app. You can modify the access point, identity provider details, and the web app units. Assigns new customization properties to a web app. You can modify the icon file, logo file, and title. Required when Required when You can provide a structure that contains the details for the identity provider to use with your web app. The A union that contains the value for number of concurrent connections or the user sessions on your web app. Key-value pairs that can be used to group and search for web apps. Returns a unique identifier for the web app. Provide the unique identifier for the web app that contains the customizations that you are deleting. Provide the unique identifier for the web app that you are deleting. Provide the unique identifier for the web app. Returns a structure that contains the details of the web app customizations. Provide the unique identifier for the web app. Returns a structure that contains the details of the web app. The details for a server host key. The Amazon Resource Name (ARN) for the IAM Identity Center application: this value is set automatically when you create your web app. The Amazon Resource Name (ARN) for the IAM Identity Center used for the web app. The IAM role in IAM Identity Center used for the web app. A structure that contains the details of the IAM Identity Center used for your web app. Returned during a call to Describes the properties of a user that was specified. The Amazon Resource Name (ARN) of the web app. The unique identifier for the web app. A structure that contains the details for the identity provider used by the web app. The The A union that contains the value for number of concurrent connections or the user sessions on your web app. Key-value pairs that can be used to group and search for web apps. Tags are metadata attached to web apps for any purpose. A structure that describes the parameters for the web app, as identified by the Returns the Amazon Resource Name (ARN) for the web app. Returns the unique identifier for your web app. Returns the page title that you defined for your web app. Returns a logo file data string (in base64 encoding). Returns a icon file data string (in base64 encoding). A structure that contains the customization fields for the web app. You can provide a title, logo, and icon to customize the appearance of your web app. Returns a structure for your identity provider details. This structure contains the instance ARN and role being used for the web app. Returns a structure that contains the identity provider details for your web app. The Amazon Resource Name (ARN) for the IAM Identity Center used for the web app. The IAM role in IAM Identity Center used for the web app. A structure that describes the values to use for the IAM Identity Center settings when you create or update a web app. Specifies the maximum number of access SIDs to return. The maximum number of items to return. The maximum number of agreements to return. The maximum number of items to return. The maximum number of certificates to return. The maximum number of items to return. The maximum number of connectors to return. The maximum number of items to return. Specifies the maximum number of executions to return. The maximum number of items to return. The maximum number of host keys to return. The maximum number of items to return. The maximum number of profiles to return. The maximum number of items to return. The maximum number of items to return. Returns the Provide this value for the Returns, for each listed web app, a structure that contains details for the web app. Specifies the maximum number of workflows to return. The maximum number of items to return. The Amazon Resource Name (ARN) for the web app. The unique identifier for the web app. The The a structure that contains details for the web app. Provide the identifier of the web app that you are updating. Provide an updated title. Specify logo file data string (in base64 encoding). Specify icon file data string (in base64 encoding). Returns the unique identifier for the web app being updated. The IAM role used to access IAM Identity Center. A structure that describes the values to use for the IAM Identity Center settings when you update a web app. A structure that describes the values to use for the IAM Identity Center settings when you update a web app. A union that contains the Provide the identifier of the web app that you are updating. Provide updated identity provider values in a The A union that contains the value for number of concurrent connections or the user sessions on your web app. Returns the unique identifier for the web app being updated. A structure that describes the values to use for the IAM Identity Center settings when you create a web app. A union that contains the An integer that represents the number of units for your desired number of concurrent connections, or the number of user sessions on your web app at the same time. Each increment allows an additional 250 concurrent sessions: a value of Contains an integer value that represents the value for number of concurrent connections or the user sessions on your web app. Creates a new direct-query data source to the specified domain. For more information, see Creating Amazon OpenSearch Service data source integrations with Amazon S3. Adds a new data source in Amazon OpenSearch Service so that you can perform direct queries on external data. Attaches tags to an existing Amazon OpenSearch Service domain. Tags are a set of case-sensitive key-value pairs. A domain can have up to 10 tags. For more information, see Tagging Amazon OpenSearch Service domains. Attaches tags to an existing Amazon OpenSearch Service domain, data source, or application. Tags are a set of case-sensitive key-value pairs. A domain, data source, or application can have up to 10 tags. For more information, see Tagging Amazon OpenSearch Service resources. Deletes a direct-query data source. For more information, see Deleting an Amazon OpenSearch Service data source with Amazon S3. Deletes a previously configured direct query data source from Amazon OpenSearch Service. Retrieves information about a direct query data source. Returns detailed configuration information for a specific direct query data source in Amazon OpenSearch Service. Lists direct-query data sources for a specific domain. For more information, see For more information, see Working with Amazon OpenSearch Service direct queries with Amazon S3. Lists an inventory of all the direct query data sources that you have configured within Amazon OpenSearch Service. Returns all resource tags for an Amazon OpenSearch Service domain. For more information, see Tagging Amazon OpenSearch Service domains. Returns all resource tags for an Amazon OpenSearch Service domain, data source, or application. For more information, see Tagging Amazon OpenSearch Service resources. Removes the specified set of tags from an Amazon OpenSearch Service domain. For more information, see Tagging Amazon OpenSearch Service domains. Removes the specified set of tags from an Amazon OpenSearch Service domain, data source, or application. For more information, see Tagging Amazon OpenSearch Service resources. Updates a direct-query data source. For more information, see Working with Amazon OpenSearch Service data source integrations with Amazon S3. Updates the configuration or properties of an existing direct query data source in Amazon OpenSearch Service. The result of an A unique, user-defined label to identify the data source within your OpenSearch Service environment. The supported Amazon Web Services service that you want to use as the source for direct queries in OpenSearch Service. An optional text field for providing additional context and details about the data source. A list of Amazon Resource Names (ARNs) for the OpenSearch collections that are associated with the direct query data source. The unique, system-generated identifier that represents the data source. Amazon Resource Name (ARN) for the OpenSearch Service domain to which you want to attach resource tags. Amazon Resource Name (ARN) for the OpenSearch Service domain, data source, or application to which you want to attach resource tags. List of resource tags. Container for the parameters to the Container for the parameters to the The unique identifier of the IAM role that grants OpenSearch Service permission to access the specified data source. Configuration details for a CloudWatch Logs data source that can be used for direct queries. ARN of the Cloudwatch log group to publish logs to. The result of a A unique, user-defined label to identify the data source within your OpenSearch Service environment. A unique, user-defined label to identify the data source within your OpenSearch Service environment. The supported Amazon Web Services service that is used as the source for direct queries in OpenSearch Service. A description that provides additional context and details about the data source. A list of Amazon Resource Names (ARNs) for the OpenSearch collections that are associated with the direct query data source. The unique, system-generated identifier that represents the data source. A list of tags attached to a direct query data source. The configuration details for a data source that can be directly queried. Specifies CloudWatch Logs as a type of data source for direct queries. Specifies Security Lake as a type of data source for direct queries. The type of data source that is used for direct queries. This is a supported Amazon Web Services service, such as CloudWatch Logs or Security Lake. The result of a A unique, user-defined label that identifies the data source within your OpenSearch Service environment. A unique, user-defined label to identify the data source within your OpenSearch Service environment. The supported Amazon Web Services service that is used as the source for direct queries in OpenSearch Service. A description that provides additional context and details about the data source. A list of Amazon Resource Names (ARNs) for the OpenSearch collections that are associated with the direct query data source. The unique, system-generated identifier that represents the data source. The result of a A list of the direct query data sources that are returned by the Amazon Resource Name (ARN) for the domain to view tags for. Amazon Resource Name (ARN) for the domain, data source, or application to view tags for. List of resource tags associated with the specified domain. List of resource tags associated with the specified domain, data source, or application. The results of a The Amazon Resource Name (ARN) of the domain from which you want to delete the specified tags. The Amazon Resource Name (ARN) of the domain, data source, or application from which you want to delete the specified tags. The list of tag keys to remove from the domain. The list of tag keys to remove from the domain, data source, or application. Container for the request parameters to the The unique identifier of the IAM role that grants OpenSearch Service permission to access the specified data source. Configuration details for a Security Lake data source that can be used for direct queries. The result of an A unique, user-defined label to identify the data source within your OpenSearch Service environment. The supported Amazon Web Services service that you want to use as the source for direct queries in OpenSearch Service. An optional text field for providing additional context and details about the data source. A list of Amazon Resource Names (ARNs) for the OpenSearch collections that are associated with the direct query data source. The unique, system-generated identifier that represents the data source. Creates and Amazon Q in Connect AI Agent version. Creates an Amazon Q in Connect AI Guardrail. Creates an Amazon Q in Connect AI Guardrail version. Deletes an Amazon Q in Connect AI Agent Version. Deletes an Amazon Q in Connect AI Guardrail. Delete and Amazon Q in Connect AI Guardrail version. Gets an Amazon Q in Connect AI Agent. Gets the Amazon Q in Connect AI Guardrail. Retrieves the Amazon Q in Connect message template. The message template identifier can contain an optional qualifier, for example, Retrieves next message on an Amazon Q in Connect session. Lists AI Agents. Lists AI Guardrail versions. Lists the AI Guardrails available on the Amazon Q in Connect assistant. Lists all the available Amazon Q in Connect message templates for the specified knowledge base. Lists messages on an Amazon Q in Connect session. Searches for sessions. Submits a message to the Amazon Q in Connect session. Updates an AI Agent. Updates an AI Guardrail. The configuration for AI Agents of type The configuration for AI Agents of type SELF_SERVICE. A typed union that specifies the configuration based on the type of AI Agent. The summary of the AI Agent version. Messaging for when violations are detected in text Contains the type of the content filter and how strongly it should apply to prompts and model responses. Contains details about how to handle harmful content. The filter configuration details for the AI Guardrails contextual grounding policy. The policy configuration details for the AI Guardrail's contextual grounding policy. The Amazon Resource Name (ARN) of the AI Prompt. The Amazon Resource Name (ARN) of the AI Guardrail. The identifier of the Amazon Q in Connect AI prompt. The API format used for this AI Prompt. The identifier of the Amazon Q in Connect AI Guardrail. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The description of the AI Prompt. The message to return when the AI Guardrail blocks a prompt. The identifier of the model used for this AI Prompt. Model Ids supported are: The message to return when the AI Guardrail blocks a model response. Contains details about how to handle harmful content. The policy configuration details for the AI Guardrail's contextual grounding policy. A description of the AI Guardrail. The time the AI Prompt was last modified. The time the AI Guardrail was last modified. The name of the AI Prompt The name of the AI Guardrail. The origin of the AI Prompt. Contains details about PII entities and regular expressions to configure for the AI Guardrail. The status of the AI Prompt. The status of the AI Guardrail. The tags used to organize, track, or control access for this resource. The configuration of the prompt template for this AI Prompt. The type of the prompt template for this AI Prompt. The type of this AI Prompt. Contains details about topics that the AI Guardrail should identify and deny. The visibility status of the AI Prompt. The visibility status of the AI Guardrail. Contains details about the word policy to configured for the AI Guardrail. The data for the AI Prompt The data for the AI Guardrail Description of the guardrail or its version A list of PII entities to configure to the AI Guardrail. A list of regular expressions to configure to the AI Guardrail. Contains details about PII entities and regular expressions to configure for the AI Guardrail. The Amazon Resource Name (ARN) of the AI Prompt. The Amazon Resource Name (ARN) of the AI Guardrail. The identifier of the Amazon Q in Connect AI prompt. The API format used for this AI Prompt. The identifier of the Amazon Q in Connect AI Guardrail. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The description of the AI Prompt. The identifier of the model used for this AI Prompt. Model Ids supported are: A description of the AI Guardrail. The time the AI Prompt was last modified. The time the AI Guardrail was last modified. The name of the AI Prompt. The origin of the AI Prompt. The name of the AI Guardrail. The status of the AI Prompt. The status of the AI Guardrail. The tags used to organize, track, or control access for this resource. The type of the prompt template for this AI Prompt. The type of this AI Prompt. The visibility status of the AI Prompt. The visibility status of the AI Guardrail. The summary of the AI Prompt. The summary of the AI Guardrail. The configuration for a prompt template that supports full textual prompt configuration using a YAML prompt. A list of policies related to topics that the AI Guardrail should deny. A typed union that specifies the configuration for a prompt template based on its type. Contains details about topics that the AI Guardrail should identify and deny. The data for the summary of the AI Guardrail version. The version number for this AI Guardrail version. The summary of the AI Guardrail version. A list of managed words to configure for the AI Guardrail. A list of words to configure for the AI Guardrail. Contains details about the word policy to configured for the AI Guardrail. The Amazon Resource Name (ARN) of the AI Prompt. The identifier of the Amazon Q in Connect AI prompt. The API format used for this AI Prompt. The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The description of the AI Prompt. The identifier of the model used for this AI Prompt. Model Ids supported are: The time the AI Prompt was last modified. The name of the AI Prompt The origin of the AI Prompt. The status of the AI Prompt. The tags used to organize, track, or control access for this resource. The configuration of the prompt template for this AI Prompt. The type of the prompt template for this AI Prompt. The type of this AI Prompt. The visibility status of the AI Prompt. The data for the AI Prompt The Amazon Resource Name (ARN) of the AI Prompt. The identifier of the Amazon Q in Connect AI prompt. The API format used for this AI Prompt. The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The description of the AI Prompt. The identifier of the model used for this AI Prompt. Model Ids supported are: The time the AI Prompt was last modified. The name of the AI Prompt. The origin of the AI Prompt. The status of the AI Prompt. The tags used to organize, track, or control access for this resource. The type of the prompt template for this AI Prompt. The type of this AI Prompt. The visibility status of the AI Prompt. The summary of the AI Prompt. The configuration for a prompt template that supports full textual prompt configuration using a YAML prompt. A typed union that specifies the configuration for a prompt template based on its type. The AI Guardrail identifier for the Answer Generation Guardrail used by the The AI Prompt identifier for the Answer Generation prompt used by the The self service conversation history before the Amazon Q in Connect session. The conversation context to include in SendMessage. The reason of the conversation state. The status of the conversation state. The conversation state associated to a message. The API Format of the AI Prompt. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The message to return when the AI Guardrail blocks a prompt. The message to return when the AI Guardrail blocks a model response. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.. The description of the AI Prompt. The content filter policies to configure for the AI Guardrail. The identifier of the model used for this AI Prompt. Model Ids supported are: The contextual grounding policy configuration used to create an AI Guardrail. A description of the AI Guardrail. The name of the AI Prompt. The name of the AI Guardrail. The sensitive information policy to configure for the AI Guardrail. The tags used to organize, track, or control access for this resource. The configuration of the prompt template for this AI Prompt. The topic policies to configure for the AI Guardrail. The type of the prompt template for this AI Prompt. The visibility status of the AI Guardrail. The word policy you configure for the AI Guardrail. The data of the AI Guardrail. The identifier of the Amazon Q in Connect AI Guardrail. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.. The time the AI Guardrail was last modified. The data of the AI Guardrail version. The version number of the AI Guardrail version. The API Format of the AI Prompt. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.. The description of the AI Prompt. The identifier of the model used for this AI Prompt. Model Ids supported are: The name of the AI Prompt. The tags used to organize, track, or control access for this resource. The configuration of the prompt template for this AI Prompt. The type of the prompt template for this AI Prompt. The identifier of the Amazon Q in Connect AI Guardrail. Can be either the ID or the ARN. URLs cannot contain the ARN. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The identifier of the Amazon Q in Connect AI Guardrail. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The version number of the AI Guardrail version to be deleted. The identifier of the Amazon Q in Connect AI Guardrail. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The data of the AI Guardrail. The version number of the AI Guardrail version (returned if an AI Guardrail version was specified via use of a qualifier for the The identifier of the Amazon Q in Connect assistant. The token for the next message. Use the value returned in the SendMessage or previous response in the next request to retrieve the next message. The identifier of the Amazon Q in Connect session. The conversation data stored on an Amazon Q in Connect Session. The state of current conversation. The token for the next message. The identifier of the submitted message. The message response to the requested message. The type of message response. The strength of the content filter to apply to prompts. As you increase the filter strength, the likelihood of filtering harmful content increases and the probability of seeing harmful content in your application reduces. The strength of the content filter to apply to model responses. As you increase the filter strength, the likelihood of filtering harmful content increases and the probability of seeing harmful content in your application reduces. The harmful category that the content filter is applied to. Contains filter strengths for harmful content. AI Guardrail's support the following content filters to detect and filter harmful user inputs and FM-generated outputs. Hate: Describes input prompts and model responses that discriminate, criticize, insult, denounce, or dehumanize a person or group on the basis of an identity (such as race, ethnicity, gender, religion, sexual orientation, ability, and national origin). Insults: Describes input prompts and model responses that includes demeaning, humiliating, mocking, insulting, or belittling language. This type of language is also labeled as bullying. Sexual: Describes input prompts and model responses that indicates sexual interest, activity, or arousal using direct or indirect references to body parts, physical traits, or sex. Violence: Describes input prompts and model responses that includes glorification of, or threats to inflict physical pain, hurt, or injury toward a person, group, or thing. Content filtering depends on the confidence classification of user inputs and FM responses across each of the four harmful categories. All input and output statements are classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each harmful category. For example, if a statement is classified as Hate with HIGH confidence, the likelihood of the statement representing hateful content is high. A single statement can be classified across multiple categories with varying confidence levels. For example, a single statement can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence. Type of text to text filter in content policy List of content filter configs in content policy. The threshold details for the AI Guardrail's contextual grounding filter. The filter type for the AI Guardrail's contextual grounding filter. The filter configuration details for the AI Guardrail's contextual grounding filter. The threshold for this filter. Type of contextual grounding filter List of contextual grounding filter configs. Strength for filters A config for the list of managed words. The managed word type to configure for the AI Guardrail. The managed word list to configure for the AI Guardrail. Options for managed words. List of entities. Configure AI Guardrail's action when the PII entity is detected. Configure AI Guardrail type when the PII entity is detected. The following PIIs are used to block or mask sensitive information: General ADDRESS A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood. AGE An individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age. NAME An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address. EMAIL An email address, such as marymajor@email.com. PHONE A phone number. This entity type also includes fax and pager numbers. USERNAME A user name that identifies an account, such as a login name, screen name, nick name, or handle. PASSWORD An alphanumeric string that is used as a password, such as \"* very20special#pass*\". DRIVER_ID The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters. LICENSE_PLATE A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country. VEHICLE_IDENTIFICATION_NUMBER A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs. Finance REDIT_DEBIT_CARD_CVV A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code. CREDIT_DEBIT_CARD_EXPIRY The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021. CREDIT_DEBIT_CARD_NUMBER The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present. PIN A four-digit personal identification number (PIN) with which you can access your bank account. INTERNATIONAL_BANK_ACCOUNT_NUMBER An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure. SWIFT_CODE A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers. SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office. IT IP_ADDRESS An IPv4 address, such as 198.51.100.0. MAC_ADDRESS A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC). URL A web address, such as www.example.com. AWS_ACCESS_KEY A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. AWS_SECRET_KEY A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. USA specific US_BANK_ACCOUNT_NUMBER A US bank account number, which is typically 10 to 12 digits long. US_BANK_ROUTING_NUMBER A US bank account routing number. These are typically nine digits long, US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits. US_PASSPORT_NUMBER A US passport number. Passport numbers range from six to nine alphanumeric characters. US_SOCIAL_SECURITY_NUMBER A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents. Canada specific CA_HEALTH_NUMBER A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits. CA_SOCIAL_INSURANCE_NUMBER A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits. The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm . UK Specific UK_NATIONAL_HEALTH_SERVICE_NUMBER A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum. UK_NATIONAL_INSURANCE_NUMBER A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system. The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits. UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business. Custom Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc.. The PII entity to configure for the AI Guardrail. The currently supported PII entities The AI Guardrail action to configure when matching regular expression is detected. The description of the regular expression to configure for the AI Guardrail. The name of the regular expression to configure for the AI Guardrail. The regular expression pattern to configure for the AI Guardrail. The regular expression to configure for the AI Guardrail. The regex description. The regex name. The regex pattern. List of regex. Options for sensitive information action. A definition of the topic to deny. A list of prompts, each of which is an example of a prompt that can be categorized as belonging to the topic. The name of the topic to deny. Specifies to deny the topic. Details about topics for the AI Guardrail to identify and deny. Definition of topic in topic policy Text example in topic policy List of text examples Name of topic in topic policy Type of topic in a policy List of topic configs in topic policy. Text of the word configured for the AI Guardrail to block. A word to configure for the AI Guardrail. The custom word text. List of custom word configs. Contains details about how to ingest the documents in a data source. Summary information about the knowledge base. Summary information about the knowledge base. The identifier of the Amazon Q in Connect AI Agent for which versions are to be listed. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The maximum number of results to return per page. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The origin of the AI Agent versions to be listed. The summaries of AI Agent versions. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The maximum number of results to return per page. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The origin of the AI Agents to be listed. The summaries of AI Agents. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The identifier of the Amazon Q in Connect AI Agent for which versions are to be listed. The identifier of the Amazon Q in Connect AI Guardrail for which versions are to be listed. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The origin of the AI Agent versions to be listed. The summaries of AI Agent versions. The summaries of the AI Guardrail versions. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The origin of the AI Agents to be listed. The summaries of AI Agents. The summaries of the AI Guardrails. The identifier of the Amazon Q in Connect assistant. The maximum number of results to return per page. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The identifier of the Amazon Q in Connect session. The message information. The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The AI Guardrail identifier for the Answer Generation guardrail used by the MANUAL_SEARCH AI Agent. The AI Prompt identifier for the Answer Generation prompt used by the MANUAL_SEARCH AI Agent. The message data in text type. The message data. The message input value. The message input. The identifier of a message. The participant of a message. The timestamp of a message. The value of a message data. The message output. The association configurations for overriding behavior on this AI Agent. The AI Guardrail identifier used by the SELF_SERVICE AI Agent. The AI Prompt identifier for the Self Service Answer Generation prompt used by the SELF_SERVICE AI Agent The AI Prompt identifier for the Self Service Pre-Processing prompt used by the SELF_SERVICE AI Agent The configuration for AI Agents of type SELF_SERVICE. The bot response of the conversation history data. The input transcript of the conversation history data. The number of turn of the conversation history data. The conversation history data to included in conversation context data before the the Amazon Q in Connect session.. The identifier of the Amazon Q in Connect assistant. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field.For more information about idempotency, see Making retries safe with idempotent APIs. The conversation context before the Amazon Q in Connect session. The message data to submit to the Amazon Q in Connect session. The identifier of the Amazon Q in Connect session. The message type. The token for the next message, used by GetNextMessage. The identifier of the submitted message. The configuration for a prompt template that supports full textual prompt configuration using a YAML prompt. The value of the message data in text type. The message data in text type. The identifier of the Amazon Q in Connect AI Guardrail. The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. The message to return when the AI Guardrail blocks a prompt. The message to return when the AI Guardrail blocks a model response. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.. The content filter policies to configure for the AI Guardrail. The contextual grounding policy configuration used to create an AI Guardrail. A description of the AI Guardrail. The sensitive information policy to configure for the AI Guardrail. The topic policies to configure for the AI Guardrail. The visibility status of the Amazon Q in Connect AI Guardrail. The word policy you configure for the AI Guardrail. The data of the updated Amazon Q in Connect AI Guardrail. Creates an Amazon Q Business application. There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users. A Amazon Q Apps service linked role will be created if it's absent in the Amazon Web Services account when the QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps Creates an Amazon Q Business application. There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users. An Amazon Q Apps service linked role will be created if it's absent in the Amazon Web Services account when When you create an application, Amazon Q Business may securely transmit data for processing from your selected Amazon Web Services region, but within your geography. For more information, see Cross region inference in Amazon Q Business. Gets information about an existing Amazon Q Business index. Returns the image bytes corresponding to a media object. If you have implemented your own application with the Chat and ChatSync APIs, and have enabled content extraction from visual data in Amazon Q Business, you use the GetMedia API operation to download the images so you can show them in your UI with responses. For more information, see Extracting semantic meaning from images and visuals. Lists Amazon Q Business applications. Lists Amazon Q Business applications. Amazon Q Business applications may securely transmit data for processing across Amazon Web Services Regions within your geography. For more information, see Cross region inference in Amazon Q Business. Gets a list of attachments associated with an Amazon Q Business web experience or a list of attachements associated with a specific Amazon Q Business conversation. Updates an existing Amazon Q Business application. A Amazon Q Apps service-linked role will be created if it's absent in the Amazon Web Services account when the QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps Updates an existing Amazon Q Business application. Amazon Q Business applications may securely transmit data for processing across Amazon Web Services Regions within your geography. For more information, see Cross region inference in Amazon Q Business. An Amazon Q Apps service-linked role will be created if it's absent in the Amazon Web Services account when The creator mode specific admin controls configured for an Amazon Q Business application. Determines whether an end user can generate LLM-only responses when they use the web experience. For more information, see Admin controls and guardrails and Conversation settings. The identifier of the Amazon Q Business attachment. The identifier of the Amazon Q Business conversation the attachment is associated with. The name of the file. Filename of the Amazon Q Business attachment. A CopyFromSource containing a reference to the original source of the Amazon Q Business attachment. Filetype of the Amazon Q Business attachment. Size in bytes of the Amazon Q Business attachment. MD5 checksum of the Amazon Q Business attachment contents. The Unix timestamp when the Amazon Q Business attachment was created. AttachmentStatus of the Amazon Q Business attachment. ErrorDetail providing information about a Amazon Q Business attachment error. An attachment in an Amazon Q Business conversation. The data contained within the uploaded file. The contents of the attachment. The filename of the attachment. A reference to an existing attachment. A file directly uploaded into a web experience chat. This is either a file directly uploaded into a web experience chat or a reference to an existing attachment that is part of a web experience chat. A file input event activated by a end user request to upload files into their web experience chat. An error associated with a file uploaded during chat. The unique identifier of the Amazon Q Business attachment. The unique identifier of the Amazon Q Business conversation. The details of a file uploaded during chat. Specify the browser extensions allowed for your Amazon Q web experience. The container for browser extension configuration for an Amazon Q Business web experience. The chat modes available to an Amazon Q Business end user. For more information, see Admin controls and guardrails, Plugins, and Conversation settings. The If none of the modes are selected, Amazon Q will only respond using the information from the attached files. For more information, see Admin controls and guardrails, Plugins, and Response sources. The type of the resource affected. You are trying to perform an action that conflicts with the current status of your resource. Fix any inconsistences with your resources and try again. You are trying to perform an action that conflicts with the current status of your resource. Fix any inconsistencies with your resources and try again. The unique identifier of the Amazon Q Business conversation. The unique identifier of the Amazon Q Business attachment. The source reference for an existing attachment in an existing conversation. A reference to an attachment in an existing conversation. The source reference for an existing attachment. The authentication type being used by a Amazon Q Business application. The Amazon Resource Name (ARN) of an identity provider being used by an Amazon Q Business application. A token you provide to identify a request to create a data source connector. Multiple calls to the The configuration for extracting information from media in documents during ingestion. A name for the Amazon Q Business index. The index type that's suitable for your needs. For more information on what's included in each type of index, see Amazon Q Business tiers. A description for the Amazon Q Business index. The index type that's suitable for your needs. For more information on what's included in each type of index, see Amazon Q Business tiers. A list of key-value pairs that identify or categorize the index. You can also use tags to help control access to the index. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @. Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience. The browser extension configuration for an Amazon Q Business web experience. For Amazon Q Business application using external OIDC-compliant identity providers (IdPs). The IdP administrator must add the browser extension sign-in redirect URLs to the IdP application. For more information, see Configure external OIDC identity provider for your browser extensions.. The configuration information for altering document metadata and content during the document ingestion process. The configuration for extracting information from media in the document. A document in an Amazon Q Business application. The message explaining the data source sync error. The message explaining the Amazon Q Business request error. The code associated with the data source sync error. The code associated with the Amazon Q Business request error. Provides information about a data source sync error. Provides information about a Amazon Q Business request error. An external resource that you configured with your application is returning errors and preventing this operation from succeeding. Fix those errors and try again. The authentication type being used by a Amazon Q Business application. The Amazon Resource Name (ARN) of an identity provider being used by an Amazon Q Business application. When the The configuration for extracting information from media in documents for the data source. The name of the Amazon Q Business index. The type of index attached to your Amazon Q Business application. The Amazon Resource Name (ARN) of the Amazon Q Business index. The current status of the index. When the value is The type of index attached to your Amazon Q Business application. The description for the Amazon Q Business index. The identifier of the Amazon Q Business which contains the media object. The identifier of the Amazon Q Business conversation. The identifier of the Amazon Q Business message. The identifier of the media object. You can find this in the The base64-encoded bytes of the media object. The MIME type of the media object (image/png). When the The browser extension configuration for an Amazon Q Business web experience. Provides the configuration information for invoking a Lambda function in Lambda to alter document metadata and content when ingesting documents into Amazon Q Business. You can configure your Lambda function using the If you want to apply advanced alterations on the Amazon Q Business structured documents, you must configure your Lambda function using You can only invoke one Lambda function. However, this function can invoke other functions it requires. For more information, see Custom document enrichment. Specify whether to extract semantic meaning from images and visuals from documents. The configuration for extracting semantic meaning from images in documents. For more information, see Extracting semantic meaning from images and visuals. The unique identifier for the Amazon Q Business application. The unique identifier of the Amazon Q Business web experience conversation. The unique identifier of the user involved in the Amazon Q Business web experience conversation. If the number of attachments returned exceeds The maximum number of attachements to return. An array of information on one or more attachments. If the response is truncated, Amazon Q Business returns this token, which you can use in a later request to list the next set of attachments. If the number of retrievers returned exceeds If the number of messages returned exceeds The configuration for extracting semantic meaning from images in documents. For more information, see Extracting semantic meaning from images and visuals. The configuration for extracting information from media in documents. The requested media object is too large to be returned. The documents used to generate an Amazon Q Business web experience response. The relevant text excerpt from a source that was used to generate a citation text segment in an Amazon Q Business chat response. The identifier of the media object associated with the text segment in the source attribution. The MIME type (image/png) of the media object associated with the text segment in the source attribution. Provides information about a text extract in a chat response that can be attributed to a source document. The Amazon Resource Name (ARN) of an IAM role with permission to access the data source and required resources. The configuration for extracting information from media in documents for your data source. Updates the website domain origins that are allowed to embed the Amazon Q Business web experience. The browser extension configuration for an Amazon Q Business web experience. For Amazon Q Business application using external OIDC-compliant identity providers (IdPs). The IdP administrator must add the browser extension sign-in redirect URLs to the IdP application. For more information, see Configure external OIDC identity provider for your browser extensions.. Returns a list of the access grants that were given to the caller using S3 Access Grants and that allow the caller to access the S3 data of the Amazon Web Services account specified in the request. You must have the Use this API to list the access grants that grant the caller access to Amazon S3 data through S3 Access Grants. The caller (grantee) can be an Identity and Access Management (IAM) identity or Amazon Web Services Identity Center corporate directory identity. You must pass the Amazon Web Services account of the S3 data owner (grantor) in the request. You can, optionally, narrow the results by You must have the The action that you want this job to perform on every object listed in the manifest. For more information about the available actions, see Operations in the Amazon S3 User Guide. The action that you want this job to perform on every object listed in the manifest. For more information about the available actions, see Operations in the Amazon S3 User Guide. Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation. General purpose buckets - For example, to copy objects to a general purpose bucket named Directory buckets - For example, to copy objects to a directory bucket named Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation. General purpose buckets - For example, to copy objects to a general purpose bucket named Directory buckets - For example, to copy objects to a directory bucket named Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, you get an HTTP This functionality is not supported by directory buckets. Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. Directory buckets - If you specify Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Amazon Web Services KMS (SSE-KMS). Setting this header to Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not supported by directory buckets. Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Amazon Web Services KMS (SSE-KMS). Setting this header to Specifying this header with an Copy action doesn’t affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through the Copy operation in Batch Operations. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) ( The server-side encryption algorithm used when storing objects in Amazon S3. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( The workgroup of the Athena table reference. The output location for the Athena table. The database name. The table reference. A reference to a table within Athena. The type of the column. A column within a schema relation, derived from the underlying Glue table. A column within a schema relation, derived from the underlying table. The Glue table that this configured table represents. The table that this configured table represents. A reference to the Glue table being configured. A reference to the table being configured. Information about the schema type properties. The secret ARN of the Snowflake table reference. The account identifier for the Snowflake table reference. The name of the database the Snowflake table belongs to. The name of the Snowflake table. The schema name of the Snowflake table reference. The schema of the Snowflake table. A reference to a table within Snowflake. The schema of a Snowflake table. The schema of a Snowflake table. The column name. The column's data type. Supported data types: The Snowflake table schema. If present, a reference to the Glue table referred to by this table reference. If present, a reference to the Snowflake table referred to by this table reference. If present, a reference to the Athena table referred to by this table reference. A pointer to the dataset that underlies this table. Currently, this can only be an Glue table. A pointer to the dataset that underlies this table. Creates a data source connector for a knowledge base. You can't change the Connects a knowledge base to a data source. You specify the configuration for the specific data source service in the You can't change the Deletes a knowledge base. Before deleting a knowledge base, you should disassociate the knowledge base from any agents that it is associated with by making a DisassociateAgentKnowledgeBase request. Deletes documents from a data source and syncs the changes to the knowledge base that is connected to it. For more information, see Ingest documents into a knowledge base in real-time in the Amazon Bedrock User Guide. Gets information about a knoweldge base. Retrieves specific documents from a data source that is connected to a knowledge base. For more information, see Ingest documents into a knowledge base in real-time in the Amazon Bedrock User Guide. Retrieves information about the working draft ( Ingests documents directly into the knowledge base that is connected to the data source. The Lists the data ingestion jobs for a data source. The list also includes information about each job. Retrieves all the documents contained in a data source that is connected to a knowledge base. For more information, see Ingest documents into a knowledge base in real-time in the Amazon Bedrock User Guide. The base64-encoded string of the content. The MIME type of the content. For a list of MIME types, see Media Types. The following MIME types are supported: text/plain text/html text/csv text/vtt message/rfc822 application/xhtml+xml application/pdf application/msword application/vnd.ms-word.document.macroenabled.12 application/vnd.ms-word.template.macroenabled.12 application/vnd.ms-excel application/vnd.ms-excel.addin.macroenabled.12 application/vnd.ms-excel.sheet.macroenabled.12 application/vnd.ms-excel.template.macroenabled.12 application/vnd.ms-excel.sheet.binary.macroenabled.12 application/vnd.ms-spreadsheetml application/vnd.openxmlformats-officedocument.spreadsheetml.sheet application/vnd.openxmlformats-officedocument.spreadsheetml.template application/vnd.openxmlformats-officedocument.wordprocessingml.document application/vnd.openxmlformats-officedocument.wordprocessingml.template Contains information about content defined inline in bytes. A unique identifier for the document. Contains information about content defined inline to ingest into a knowledge base. Contains information about the Amazon S3 location of the file from which to ingest data. The source of the data to ingest. Contains information about the content to ingest into a knowledge base connected to a custom data source. Choose a The identifier of the document to ingest into a custom data source. Contains information about the identifier of the document to ingest into a custom data source. Details of custom orchestration. The identifier of the Amazon Web Services account that owns the S3 bucket containing the content to ingest. The S3 URI of the file containing the content to ingest. Contains information about the Amazon S3 location of the file containing the content to ingest into a knowledge base connected to a custom data source. Details about a cyclic connection detected in the flow. A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. The unique identifier of the data source that contains the documents. A list of objects, each of which contains information to identify a document to delete. The unique identifier of the knowledge base that is connected to the data source. A list of objects, each of which contains information about the documents that were deleted. Contains information about the content to ingest into a knowledge base connected to a custom data source. The type of data source that is connected to the knowledge base to which to ingest this document. Contains information about the content to ingest into a knowledge base connected to an Amazon S3 data source Contains information about the content of a document. Choose a Contains information that identifies the document in a custom data source. The type of data source connected to the knowledge base that contains the document. Contains information that identifies the document in an S3 data source. Contains information that identifies the document. An array of objects, each of which defines a metadata attribute to associate with the content to ingest. You define the attributes inline. The Amazon S3 location of the file containing metadata to associate with the content to ingest. The type of the source source from which to add metadata. Contains information about the metadata associate with the content to ingest into a knowledge base. Choose a Bedrock models embedding data type. Can be either float32 or binary The unique identifier of the data source that contains the documents. A list of objects, each of which contains information to identify a document for which to retrieve information. The unique identifier of the knowledge base that is connected to the data source. A list of objects, each of which contains information about the documents that were retrieved. Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. The unique identifier of the data source connected to the knowledge base that you're adding documents to. A list of objects, each of which contains information about the documents to add. The unique identifier of the knowledge base to ingest the documents into. A list of objects, each of which contains information about the documents that were ingested. Contains details about a data ingestion job. Contains information about content defined inline in bytes. Contains information about content defined inline in text. The type of inline content to define. Contains information about content defined inline to ingest into a data source. Choose a Contains configurations for an iterator node in a flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output. The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node. Contains details about the vector embeddings configuration of the knowledge base. Contains the content of the document. Contains the metadata to associate with the document. Contains information about a document to ingest into a knowledge base and metadata to associate with it. The identifier of the data source connected to the knowledge base that the document was ingested into or deleted from. Contains information that identifies the document. The identifier of the knowledge base that the document was ingested into or deleted from. The ingestion status of the document. The following statuses are possible: STARTED – You submitted the ingestion job containing the document. PENDING – The document is waiting to be ingested. IN_PROGRESS – The document is being ingested. INDEXED – The document was successfully indexed. PARTIALLY_INDEXED – The document was partially indexed. METADATA_PARTIALLY_INDEXED – You submitted metadata for an existing document and it was partially indexed. METADATA_UPDATE_FAILED – You submitted a metadata update for an existing document but it failed. FAILED – The document failed to be ingested. NOT_FOUND – The document wasn't found. IGNORED – The document was ignored during ingestion. DELETING – You submitted the delete job containing the document. DELETE_IN_PROGRESS – The document is being deleted. The reason for the status. Appears alongside the status The date and time at which the document was last updated. Contains the details for a document that was ingested or deleted. The unique identifier of the data source that contains the documents. The unique identifier of the knowledge base that is connected to the data source. The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the If the total number of results is greater than the A list of objects, each of which contains information about the documents that were retrieved. If the total number of results is greater than the The key of the metadata attribute. Contains the value of the metadata attribute. Contains information about a metadata attribute. The value of the Boolean metadata attribute. The value of the numeric metadata attribute. An array of strings that define the value of the metadata attribute. The value of the string metadata attribute. The type of the metadata attribute. Contains the value of the metadata attribute. Choose a The S3 location of the file containing the content to ingest. Contains information about the content to ingest into a knowledge base connected to an Amazon S3 data source. The text of the content. Contains information about content defined inline in text. Deletes the emergency calling details from the specified Amazon Chime SDK Voice Connector. Deletes the external systems configuration for a Voice Connector. Retrieves the emergency calling configuration details for the specified Voice Connector. Gets information about an external systems configuration for a Voice Connector. Updates a Voice Connector's emergency calling configuration. Adds an external systems configuration to a Voice Connector. The tags assigned to the Voice Connector. The connectors for use with Amazon Connect. The following options are available: The ID of the Voice Connector for which to delete the external system configuration. The session border controllers. The contact center system. Contains information about an external systems configuration for a Voice Connector. The ID of the Voice Connector for which to return information about the external system configuration. An object that contains information about an external systems configuration for a Voice Connector. The ID of the Voice Connector for which to add the external system configuration. The session border controllers to use. The contact center system to use. An object that contains information about an external systems configuration for a Voice Connector. The ARN of the Voice Connector. The connectors for use with Amazon Connect. The Amazon Chime SDK Voice Connector configuration, including outbound host name and encryption settings. Creates a listener for a service. Before you start using your Amazon VPC Lattice service, you must add one or more listeners. A listener is a process that checks for connection requests to your services. For more information, see Listeners in the Amazon VPC Lattice User Guide. Creates a resource configuration. A resource configuration defines a specific resource. You can associate a resource configuration with a service network or a VPC endpoint. Creates a resource gateway. Creates a service network. A service network is a logical boundary for a collection of services. You can associate services and VPCs with a service network. For more information, see Service networks in the Amazon VPC Lattice User Guide. Associates the specified service network with the specified resource configuration. This allows the resource configuration to receive connections through the service network, including through a service network VPC endpoint. Associates a service with a service network. For more information, see Manage service associations in the Amazon VPC Lattice User Guide. You can't use this operation if the service and service network are already associated or if there is a disassociation or deletion in progress. If the association fails, you can retry the operation by deleting the association and recreating it. You cannot associate a service and service network that are shared with a caller. The caller must own either the service or the service network. As a result of this operation, the association is created in the service network account and the association owner account. Associates the specified service with the specified service network. For more information, see Manage service associations in the Amazon VPC Lattice User Guide. You can't use this operation if the service and service network are already associated or if there is a disassociation or deletion in progress. If the association fails, you can retry the operation by deleting the association and recreating it. You cannot associate a service and service network that are shared with a caller. The caller must own either the service or the service network. As a result of this operation, the association is created in the service network account and the association owner account. Deletes the specified listener. Deletes the specified resource configuration. Disassociates the resource configuration from the resource VPC endpoint. Deletes the specified resource gateway. Deletes a service network. You can only delete the service network if there is no service or VPC associated with it. If you delete a service network, all resources related to the service network, such as the resource policy, auth policy, and access log subscriptions, are also deleted. For more information, see Delete a service network in the Amazon VPC Lattice User Guide. Deletes the association between a service network and a resource configuration. Deletes the association between a specified service and the specific service network. This operation fails if an association is still in progress. Deletes the association between a service and a service network. This operation fails if an association is still in progress. Retrieves information about the specified listener for the specified service. Retrieves information about the specified resource configuration. Retrieves information about the specified resource gateway. Retrieves information about the resource policy. The resource policy is an IAM policy created on behalf of the resource owner when they share a resource. Retrieves information about the specified resource policy. The resource policy is an IAM policy created on behalf of the resource owner when they share a resource. Retrieves information about listener rules. You can also retrieve information about the default listener rule. For more information, see Listener rules in the Amazon VPC Lattice User Guide. Retrieves information about the specified listener rules. You can also retrieve information about the default listener rule. For more information, see Listener rules in the Amazon VPC Lattice User Guide. Retrieves information about the specified service network. Retrieves information about the specified association between a service network and a resource configuration. Retrieves information about the association between a service network and a VPC. Retrieves information about the specified association between a service network and a VPC. Lists all access log subscriptions for the specified service network or service. Lists the access log subscriptions for the specified service network or service. Lists the listeners for the specified service. Lists the resource configurations owned by or shared with this account. Lists the associations for the specified VPC endpoint. Lists the resource gateways that you own or that were shared with you. Lists the rules for the listener. Lists the rules for the specified listener. Lists the associations between a service network and a resource configuration. Lists the associations between the service network and the service. You can filter the list either by service or service network. You must provide either the service network identifier or the service identifier. Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a resource that is shared with another account, the association includes the local account ID as the prefix in the ARN for each account the resource is shared with. Lists the associations between a service network and a service. You can filter the list either by service or service network. You must provide either the service network identifier or the service identifier. Every association in Amazon VPC Lattice has a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a resource is shared with another account, the association includes the local account ID as the prefix in the ARN. Lists the service network and VPC associations. You can filter the list either by VPC or service network. You must provide either the service network identifier or the VPC identifier. Lists the associations between a service network and a VPC. You can filter the list either by VPC or service network. You must provide either the ID of the service network identifier or the ID of the VPC. Lists the associations between a service network and a VPC endpoint. Lists the service networks owned by the caller account or shared with the caller account. Also includes the account ID in the ARN to show which account owns the service network. Lists the service networks owned by or shared with this account. The account ID in the ARN shows which account owns the service network. Updates the specified listener for the specified service. Updates the specified resource configuration. Updates the specified resource gateway. Updates a rule for the listener. You can't modify a default listener rule. To modify a default listener rule, use Updates a specified rule for the listener. You can't modify a default listener rule. To modify a default listener rule, use Updates the specified service. Updates the service network and VPC association. If you add a security group to the service network and VPC association, the association must continue to always have at least one security group. You can add or edit security groups at any time. However, to remove all security groups, you must first delete the association and recreate it without security groups. Updates the service network and VPC association. If you add a security group to the service network and VPC association, the association must continue to have at least one security group. You can add or edit security groups at any time. However, to remove all security groups, you must first delete the association and then recreate it without security groups. Updates the specified target group. The date and time that the access log subscription was created, specified in ISO-8601 format. The date and time that the access log subscription was created, in ISO-8601 format. The date and time that the access log subscription was last updated, specified in ISO-8601 format. The date and time that the access log subscription was last updated, in ISO-8601 format. The ID of the service or service network. Log type of the service network. Summary information about an access log subscription. The Amazon Resource Name (ARN) of the resource. The Amazon Resource Name (ARN) of the resource. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or Amazon Resource Name (ARN) of the service network or service. The ID or ARN of the service network or service. The type of log that monitors your Amazon VPC Lattice service networks. The ID of the service network or service. The type of log that monitors your Amazon VPC Lattice service networks. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The action for the default rule. (SINGLE, GROUP, ARN) Specifies whether the resource configuration can be associated with a sharable service network. The default is false. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. The ID or Amazon Resource Name (ARN) of the listener. The name of the resource configuration. The name must be unique within the account. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. The rule match. (SINGLE, GROUP, CHILD) The TCP port ranges that a consumer can use to access a resource configuration (for example: 1-65535). You can separate port ranges using commas (for example: 1,2,22-30). The name of the rule. The name must be unique within the listener. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. (SINGLE, GROUP) The protocol accepted by the resource configuration. The priority assigned to the rule. Each rule for a specific listener must have a unique priority. The lower the priority number the higher the priority. (SINGLE, CHILD, ARN) The resource configuration. (CHILD) The ID or ARN of the parent resource configuration (type is (SINGLE, GROUP, ARN) The ID or ARN of the resource gateway used to connect to the resource configuration. For a child resource configuration, this value is inherited from the parent resource configuration. The tags for the resource configuration. The type of resource configuration. Specifies whether the resource configuration can be associated with a sharable service network. The Amazon Resource Name (ARN) of the resource configuration. The date and time that the resource configuration was created, in ISO-8601 format. The reason that the request failed. The ID of the resource configuration. The name of the resource configuration. The port range. The protocol. The resource configuration. The ID of the parent resource configuration (type is GROUP). The ID of the resource gateway associated with the resource configuration. The current status of the resource configuration. The type of resource configuration. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. The type of IP address used by the resource gateway. The name of the resource gateway. The IDs of the security groups to apply to the resource gateway. The security groups must be in the same VPC. The IDs of the VPC subnets in which to create the resource gateway. The tags for the resource gateway. The ID of the VPC for the resource gateway. The Amazon Resource Name (ARN) of the resource gateway. The ID of the resource gateway. The type of IP address for the resource gateway. The name of the resource gateway. The IDs of the security groups for the resource gateway. The status of the resource gateway. The IDs of the resource gateway subnets. The ID of the VPC. The action for the default rule. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. The ID or ARN of the listener. The rule match. The name of the rule. The name must be unique within the listener. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. The priority assigned to the rule. Each rule for a specific listener must have a unique priority. The lower the priority number the higher the priority. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The name of the service network. The name must be unique to the account. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. Specify if the service network should be enabled for sharing. The tags for the service network. A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. The ID of the resource configuration to associate with the service network. The ID of the service network to associate with the resource configuration. The tags for the association. The Amazon Resource Name (ARN) of the association. The ID of the account that created the association. The ID of the association. The status of the association. The name of the service network. Specifies if the service network is enabled for sharing. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts. The ID or ARN of the service network. You must use an ARN if the resources are in different accounts. The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN when the resources specified in the operation are in different accounts. The ID or ARN of the service network. You must use an ARN if the resources are in different accounts. The ID or Amazon Resource Name (ARN) of the access log subscription. The ID or ARN of the access log subscription. The ID or Amazon Resource Name (ARN) of the resource. The ID or ARN of the resource. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or ARN of the resource configuration. The ID or ARN of the association. The Amazon Resource Name (ARN) of the association. The ID of the association. The Amazon Resource Name (ARN) of the resource configuration associated with the VPC endpoint of type resource. The ID of the resource configuration. The ID of the resource VPC endpoint that is associated with the resource configuration. The ID or ARN of the resource gateway. The Amazon Resource Name (ARN) of the resource gateway. The ID of the resource gateway. The name of the resource gateway. The status of the resource gateway. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the rule. The ID or ARN of the rule. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The Amazon Resource Name (ARN) or ID of the service network. The ID or ARN of the service network. The ID of the association. The Amazon Resource Name (ARN) of the association. The ID of the association. The status of the association. The ID or Amazon Resource Name (ARN) of the association. The ID or ARN of the association. The ID or Amazon Resource Name (ARN) of the association. The ID or ARN of the association. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. Describes the DNS information of a service. The domain name of the resource. The type of IP address. The DNS name of the resource. The ID or Amazon Resource Name (ARN) of the access log subscription. The ID or ARN of the access log subscription. The date and time that the access log subscription was created, specified in ISO-8601 format. The date and time that the access log subscription was created, in ISO-8601 format. The date and time that the access log subscription was last updated, specified in ISO-8601 format. The date and time that the access log subscription was last updated, in ISO-8601 format. The ID of the service network or service. The log type for the service network. The ID or Amazon Resource Name (ARN) of the service network or service. The ID or ARN of the service network or service. The date and time that the auth policy was created, specified in ISO-8601 format. The date and time that the auth policy was created, in ISO-8601 format. The date and time that the auth policy was last updated, specified in ISO-8601 format. The date and time that the auth policy was last updated, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The date and time that the listener was created, specified in ISO-8601 format. The date and time that the listener was created, in ISO-8601 format. The date and time that the listener was last updated, specified in ISO-8601 format. The date and time that the listener was last updated, in ISO-8601 format. The Amazon Resource Name (ARN) of the service network or service. The ID of the resource configuration. An IAM policy. Specifies whether the resource configuration is associated with a sharable service network. Indicates whether the resource configuration was created and is managed by Amazon. The Amazon Resource Name (ARN) of the resource configuration. The date and time that the resource configuration was created, in ISO-8601 format. The custom domain name of the resource configuration. The reason the create-resource-configuration request failed. The ID of the resource configuration. The most recent date and time that the resource configuration was updated, in ISO-8601 format. The name of the resource configuration. The TCP port ranges that a consumer can use to access a resource configuration. You can separate port ranges with a comma. Example: 1-65535 or 1,2,22-30 The TCP protocol accepted by the specified resource configuration. The resource configuration. The ID of the group resource configuration. The ID of the resource gateway used to connect to the resource configuration in a given VPC. You can specify the resource gateway identifier only for resource configurations with type SINGLE, GROUP, or ARN. The status of the resource configuration. The type of resource configuration. The ID of the resource gateway. The Amazon Resource Name (ARN) of the resource gateway. The date and time that the resource gateway was created, in ISO-8601 format. The ID of the resource gateway. The type of IP address for the resource gateway. The date and time that the resource gateway was last updated, in ISO-8601 format. The name of the resource gateway. The security group IDs associated with the resource gateway. The status for the resource gateway. The IDs of the VPC subnets for resource gateway. The ID of the VPC for the resource gateway. The Amazon Resource Name (ARN) of the service network or service. An IAM policy. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the listener rule. The ID or ARN of the listener rule. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The date and time that the listener rule was created, specified in ISO-8601 format. The date and time that the listener rule was created, in ISO-8601 format. The date and time that the listener rule was last updated, specified in ISO-8601 format. The date and time that the listener rule was last updated, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the service network. The ID or ARN of the service network. The ID of the association. The Amazon Resource Name (ARN) of the association. The date and time that the association was created, in ISO-8601 format. The account that created the association. The DNS entry for the service. The failure code. The reason the association request failed. The ID of the association. Indicates whether the association is managed by Amazon. The most recent date and time that the association was updated, in ISO-8601 format. The private DNS entry for the service. The Amazon Resource Name (ARN) of the association. The ID of the resource configuration that is associated with the service network. The name of the resource configuration that is associated with the service network. The Amazon Resource Name (ARN) of the service network that is associated with the resource configuration. The ID of the service network that is associated with the resource configuration. The name of the service network that is associated with the resource configuration. The status of the association. The date and time that the service network was created, specified in ISO-8601 format. The date and time that the service network was created, in ISO-8601 format. The date and time of the last update, specified in ISO-8601 format. The date and time of the last update, in ISO-8601 format. The number of VPCs associated with the service network. Specifies if the service network is enabled for sharing. The ID or Amazon Resource Name (ARN) of the association. The ID or ARN of the association. The date and time that the association was created, specified in ISO-8601 format. The date and time that the association was created, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the association. The ID or ARN of the association. The date and time that the association was created, specified in ISO-8601 format. The date and time that the association was created, in ISO-8601 format. The ID of the specified association between the service network and the VPC. The ID of the association. The date and time that the association was last updated, specified in ISO-8601 format. The date and time that the association was last updated, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The date and time that the service was created, specified in ISO-8601 format. The date and time that the service was created, in ISO-8601 format. The date and time that the service was last updated, specified in ISO-8601 format. The date and time that the service was last updated, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. The date and time that the target group was created, specified in ISO-8601 format. The date and time that the target group was created, in ISO-8601 format. The date and time that the target group was last updated, specified in ISO-8601 format. The date and time that the target group was last updated, in ISO-8601 format. The IP address of the IP resource. Describes an IP resource. The ID or Amazon Resource Name (ARN) of the service network or service. The ID or ARN of the service network or service. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The maximum page size. A pagination token for the next page of results. The ID of the group resource configuration. The ID of the resource gateway for the resource configuration. Information about the resource configurations. If there are additional results, a pagination token for the next page of results. The maximum page size. A pagination token for the next page of results. The ID for the resource configuration associated with the VPC endpoint. The ID of the association. The ID of the VPC endpoint in the association. The owner of the VPC endpoint in the association. Information about the VPC endpoint associations. If there are additional results, a pagination token for the next page of results. The maximum page size. If there are additional results, a pagination token for the next page of results. Information about the resource gateways. If there are additional results, a pagination token for the next page of results. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The maximum page size. If there are additional results, a pagination token for the next page of results. The ID of the resource configurationk. The ID of the service network. Information about the associations. If there are additional results, a pagination token for the next page of results. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or Amazon Resource Name (ARN) of the service network. The ID or ARN of the service network. The ID or Amazon Resource Name (ARN) of the service network. The ID or ARN of the service network. The ID or Amazon Resource Name (ARN) of the VPC. The ID or ARN of the VPC. The maximum page size. If there are additional results, a pagination token for the next page of results. The ID of the service network associated with the VPC endpoint. Information about the association between the VPC endpoint and service network. If there are additional results, a pagination token for the next page of results. The ID or Amazon Resource Name (ARN) of the VPC. The ID or ARN of the VPC. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. The date and time that the listener was created, specified in ISO-8601 format. The date and time that the listener was created, in ISO-8601 format. The date and time that the listener was last updated, specified in ISO-8601 format. The date and time that the listener was last updated, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. The ID or ARN of the service network or service for which the policy is created. The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. The ID or ARN of the service network or service for which the policy is created. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. The targets that were successfully registered. The targets that were not registered. The Amazon Resource Name (ARN) of the resource. The DNS name of the resource. The IP resource. Describes a resource configuration. Indicates whether the resource configuration was created and is managed by Amazon. The Amazon Resource Name (ARN) of the resource configuration. The date and time that the resource configuration was created, in ISO-8601 format. The ID of the resource configuration. The most recent date and time that the resource configuration was updated, in ISO-8601 format. The name of the resource configuration. The ID of the group resource configuration. The ID of the resource gateway. The status of the resource configuration. The type of resource configuration. Summary information about a resource configuration. The Amazon Resource Name (ARN) of the VPC endpoint association. The date and time that the VPC endpoint association was created, in ISO-8601 format. The account that created the association. The ID of the VPC endpoint association. The Amazon Resource Name (ARN) of the resource configuration. The ID of the resource configuration. The name of the resource configuration. The ID of the VPC endpoint. The owner of the VPC endpoint. Summary information about a VPC endpoint association. The targets that were successfully registered. The Amazon Resource Name (ARN) of the resource gateway. The targets that were not registered. The date and time that the VPC endpoint association was created, in ISO-8601 format. The ID of the resource gateway. The type of IP address used by the resource gateway. The most recent date and time that the resource gateway was updated, in ISO-8601 format. The name of the resource gateway. The IDs of the security groups applied to the resource gateway. The name of the resource gateway. The IDs of the VPC subnets for the resource gateway. The ID of the VPC for the resource gateway. Summary information about a resource gateway. The date and time that the listener rule was created, specified in ISO-8601 format. The date and time that the listener rule was created, in ISO-8601 format. The date and time that the listener rule was last updated, specified in ISO-8601 format. The date and time that the listener rule was last updated, in ISO-8601 format. The priority of the rule. Summary information about the listener rule. Summary information about a listener rule. The ID or Amazon Resource Name (ARN) of the rule. The ID or ARN of the rule. Describes a rule update. The ID or Amazon Resource Name (ARN) of the rule. The ID or ARN of the rule. Describes a rule update that failed. The date and time that the association was created, in ISO-8601 format. The ID of the association. The Amazon Resource Name (ARN) of the service network. The state of the association. The ID of the VPC endpoint associated with the service network. The owner of the VPC endpoint associated with the service network. The ID of the VPC for the association. Describes the association between a service network and a VPC endpoint. The Amazon Resource Name (ARN) of the association. The date and time that the association was created, in ISO-8601 format. The account that created the association. The DNS entry for the service. The failure code. The ID of the association between the service network and resource configuration. Specifies whether the association is managed by Amazon. The private DNS entry for the service. The Amazon Resource Name (ARN) of the association. The ID of the resource configuration associated with the service network. The name of the resource configuration associated with the service network. The Amazon Resource Name (ARN) of the service network associated with the resource configuration. The ID of the service network associated with the resource configuration. The name of the service network associated with the resource configuration. The status of the service network associated with the resource configuration. Summary information about an association between a service network and a resource configuration. The date and time that the association was created, specified in ISO-8601 format. The date and time that the association was created, in ISO-8601 format. The status. If the deletion fails, try to delete again. Summary information about the association between a service network and a service. Summary information about an association between a service network and a service. The date and time that the service network was created, specified in ISO-8601 format. The date and time that the service network was created, in ISO-8601 format. The date and time that the service network was last updated, specified in ISO-8601 format. The date and time that the service network was last updated, in ISO-8601 format. The name of the service network. The number of resource configurations associated with a service network. The number of services associated with the service network. The date and time that the association was created, specified in ISO-8601 format. The date and time that the association was created, in ISO-8601 format. The date and time that the association was last updated, specified in ISO-8601 format. The date and time that the association was last updated, in ISO-8601 format. Summary information about an association between a service network and a VPC. The date and time that the service was created, specified in ISO-8601 format. The date and time that the service was created, in ISO-8601 format. The date and time that the service was last updated. The format is ISO-8601. The date and time that the service was last updated, in ISO-8601 format. Summary information about a service. Specifies if the service network is enabled for sharing. Specifies if the service network should be enabled for sharing. The date and time that the target group was created, specified in ISO-8601 format. The date and time that the target group was created, in ISO-8601 format. The date and time that the target group was last updated, specified in ISO-8601 format. The date and time that the target group was last updated, in ISO-8601 format. The ID or Amazon Resource Name (ARN) of the access log subscription. The ID or ARN of the access log subscription. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. Indicates whether to add the resource configuration to service networks that are shared with other accounts. The TCP port ranges that a consumer can use to access a resource configuration. You can separate port ranges with a comma. Example: 1-65535 or 1,2,22-30 The resource configuration. The ID of the resource configuration. Indicates whether to add the resource configuration to service networks that are shared with other accounts. The Amazon Resource Name (ARN) of the resource configuration. The ID of the resource configuration. The name of the resource configuration. The TCP port ranges that a consumer can use to access a resource configuration. You can separate port ranges with a comma. Example: 1-65535 or 1,2,22-30 The TCP protocol accepted by the specified resource configuration. The resource configuration. The ID of the group resource configuration. The ID of the resource gateway associated with the resource configuration. The status of the resource configuration. The type of resource configuration. The ID or ARN of the resource gateway. The IDs of the security groups associated with the resource gateway. The Amazon Resource Name (ARN) of the resource gateway. The ID of the resource gateway. The type of IP address used by the resource gateway. The name of the resource gateway. The IDs of the security groups associated with the resource gateway. The status of the resource gateway. The IDs of the VPC subnets for the resource gateway. The ID of the VPC for the resource gateway. The ID or Amazon Resource Name (ARN) of the listener. The ID or ARN of the listener. The ID or Amazon Resource Name (ARN) of the rule. The ID or ARN of the rule. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or Amazon Resource Name (ARN) of the service network. The ID or ARN of the service network. The ID or Amazon Resource Name (ARN) of the association. The ID or ARN of the association. The ID or Amazon Resource Name (ARN) of the service. The ID or ARN of the service. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. The ID or Amazon Resource Name (ARN) of the target group. The ID or ARN of the target group. Amazon VPC Lattice is a fully managed application networking service that you use to connect, secure, and monitor all of your services across multiple accounts and virtual private clouds (VPCs). Amazon VPC Lattice interconnects your microservices and legacy services within a logical boundary, so that you can discover and manage them more efficiently. For more information, see the Amazon VPC Lattice User Guide Creates a batch deletion job. A model evaluation job can only be deleted if it has following status Deletes a batch of evaluation jobs. An evaluation job can only be deleted if it has following status API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluation. Creates an evaluation job. Retrieves the properties associated with a model evaluation job, including the status of the job. For more information, see Model evaluation. Gets information about an evaluation job, such as the status of the job. Lists model evaluation jobs. Lists all existing evaluation jobs. Stops an in progress model evaluation job. Stops an evaluation job that is current being created or running. Specifies the required elements for an automatic model evaluation job. Configuration details of the prompt datasets and metrics you want to use for your evaluation job. Contains the evaluator model configuration details. Use to specify a automatic model evaluation job. The The configuration details of an automated evaluation job. The The ARN of the model evaluation job being deleted. The ARN of the evaluation job being deleted. A HTTP status code of the model evaluation job being deleted. A HTTP status code of the evaluation job being deleted. A status message about the model evaluation job deletion. A status message about the evaluation job deletion. A JSON array that provides the status of the model evaluation jobs being deleted. A JSON array that provides the status of the evaluation jobs being deleted. The ARN of model evaluation job to be deleted. The Amazon Resource Name (ARN) of the evaluation job for deletion. The status of the job's deletion. The status of the evaluation job for deletion. An array of model evaluation jobs to be deleted, and their associated statuses. An evaluation job for deletion, and it’s current status. An array of model evaluation job ARNs to be deleted. A list of one or more evaluation job Amazon Resource Names (ARNs) you want to delete. A JSON object containing the HTTP status codes and the ARNs of model evaluation jobs that failed to be deleted. A JSON object containing the HTTP status codes and the ARNs of evaluation jobs that failed to be deleted. The list of model evaluation jobs to be deleted. The list of evaluation jobs for deletion. The Amazon Resource Name (ARN) of the evaluator model used used in knowledge base evaluation job or in model evaluation job that use a model as judge. The evaluator model used in knowledge base evaluation job or in model evaluation job that use a model as judge. This model computes all evaluation related metrics. The file name of the document contained in the wrapper object. The MIME type of the document contained in the wrapper object. The byte value of the file to upload, encoded as a Base-64 string. Contains the document contained in the wrapper object, along with its attributes/fields. The name of the model evaluation job. Model evaluation job names must unique with your AWS account, and your account's AWS region. A name for the evaluation job. Names must unique with your Amazon Web Services account, and your account's Amazon Web Services region. A description of the model evaluation job. A description of the evaluation job. The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can assume to perform tasks on your behalf. The service role must have Amazon Bedrock as the service principal, and provide access to any Amazon S3 buckets specified in the The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can assume to perform tasks on your behalf. To learn more about the required permissions, see Required permissions for model evaluations. Specify your customer managed key ARN that will be used to encrypt your model evaluation job. Specify your customer managed encryption key Amazon Resource Name (ARN) that will be used to encrypt your evaluation job. Tags to attach to the model evaluation job. Specifies whether the evaluation job is for evaluating a model or evaluating a knowledge base (retrieval and response generation). Specifies whether the model evaluation job is automatic or uses human worker. Contains the configuration details of either an automated or human-based evaluation job. Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model or inference profile, and model evaluation job that use human workers support two models or inference profiles. Contains the configuration details of the inference model for the evaluation job. For model evaluation jobs, automated jobs support a single model or inference profile, and jobs that use human workers support two models or inference profiles. An object that defines where the results of model evaluation job will be saved in Amazon S3. Contains the configuration details of the Amazon S3 bucket for storing the results of the evaluation job. The ARN of the model evaluation job. The Amazon Resource Name (ARN) of the evaluation job. Each Amazon Bedrock support different inference parameters that change how the model behaves during inference. Contains the ARN of the Amazon Bedrock model or inference profile specified in your model evaluation job. Each Amazon Bedrock model supports different The Contains the ARN of the Amazon Bedrock model or inference profile specified in your evaluation job. Each Amazon Bedrock model supports different The Used to specify an automated model evaluation job. See Contains the configuration details of an automated evaluation job that computes metrics. Used to specify a model evaluation job that uses human workers.See Contains the configuration details of an evaluation job that uses human workers. Used to specify either a The configuration details of either an automated or human-based evaluation job. The task type you want the model to carry out. The the type of task you want to evaluate for your evaluation job. This applies only to model evaluation jobs and is ignored for knowledge base evaluation jobs. The names of the metrics used. For automated model evaluation jobs valid values are The names of the metrics you want to use for your evaluation job. For knowledge base evaluation jobs that evaluate retrieval only, valid values are \" For knowledge base evaluation jobs that evaluate retrieval with response generation, valid values are \" For automated model evaluation jobs, valid values are \" For human-based model evaluation jobs, the list of strings must match the Defines the built-in prompt datasets, built-in metric names and custom metric names, and the task type. Defines the prompt datasets, built-in metric names and custom metric names, and the task type. Used to specify the models. Specifies the inference models. Contains the configuration details of the inference for a knowledge base evaluation job, including either the retrieval only configuration or the retrieval with response generation configuration. Used to define the models you want used in your model evaluation job. Automated model evaluation jobs support only a single model. In a human-based model evaluation job, your annotator can compare the responses for up to two different models. The configuration details of the inference model for an evaluation job. For automated model evaluation jobs, only a single model is supported. For human-based model evaluation jobs, your annotator can compare the responses for up to two different models. The Amazon S3 URI where the results of model evaluation job are saved. The Amazon S3 URI where the results of the evaluation job are saved. The Amazon S3 location where the results of your model evaluation job are saved. The Amazon S3 location where the results of your evaluation job are saved. The Amazon Resource Name (ARN) of the model evaluation job. The Amazon Resource Name (ARN) of the evaluation job. The name of the model evaluation job. The name for the evaluation job. The current status of the model evaluation job. The current status of the evaluation job. When the model evaluation job was created. The time the evaluation job was created. The type, either human or automatic, of model evaluation job. Specifies whether the evaluation job is automated or human-based. What task type was used in the model evaluation job. The type of task for model evaluation. The Amazon Resource Names (ARNs) of the model(s) used in the model evaluation job. The Amazon Resource Names (ARNs) of the model(s) used for the evaluation job. The Amazon Resource Names (ARNs) of the knowledge base resources used for a knowledge base evaluation job. The Amazon Resource Names (ARNs) of the models used to compute the metrics for a knowledge base evaluation job. Specifies whether the evaluation job is for evaluating a model or evaluating a knowledge base (retrieval and response generation). A summary of the model evaluation job. Summary information of an evaluation job. The evaluator model used in knowledge base evaluation job or in model evaluation job that use a model as judge. This model computes all evaluation related metrics. Specifies the model configuration for the evaluator model. The source type of the external source wrapper object. The S3 location of the external source wrapper object. The identifier, content type, and data of the external source wrapper object. The unique external source of the content contained in the wrapper object. Contains the template for the prompt for the external source wrapper object. Configuration details for the guardrail. Configuration details for inference when using Additional model parameters and their corresponding values not included in the text inference configuration for an external source. Takes in custom model parameters specific to the language model being used. The response generation configuration of the external source wrapper object. The Amazon Resource Name (ARN) of the foundation model or inference profile used to generate responses. The document for the external source wrapper object in the Contains configurations details for response generation based on retrieved text chunks. The configuration of the external source wrapper object in the The name of metadata attribute/field, which must match the name in your data source/document metadata. The value of the metadata attribute/field. Specifies the name of the metadata attribute/field to apply filters. You must match the name of the attribute/field in your data source/document metadata. Contains the template for the prompt that's sent to the model for response generation. Contains configuration details for the guardrail. Contains configuration details for inference for knowledge base retrieval and response generation. Additional model parameters and corresponding values not included in the The configuration details for response generation based on retrieved text chunks. The Amazon Resource Name (ARN) of the model evaluation job. The Amazon Resource Name (ARN) of the evaluation job you want get information on. The name of the model evaluation job. The name for the evaluation job. The status of the model evaluation job. The current status of the evaluation job. The Amazon Resource Name (ARN) of the model evaluation job. The Amazon Resource Name (ARN) of the evaluation job. The description of the model evaluation job. The description of the evaluation job. The Amazon Resource Name (ARN) of the IAM service role used in the model evaluation job. The Amazon Resource Name (ARN) of the IAM service role used in the evaluation job. The Amazon Resource Name (ARN) of the customer managed key specified when the model evaluation job was created. The Amazon Resource Name (ARN) of the customer managed encryption key specified when the evaluation job was created. The type of model evaluation job. Specifies whether the evaluation job is automated or human-based. Specifies whether the evaluation job is for evaluating a model or evaluating a knowledge base (retrieval and response generation). Contains details about the type of model evaluation job, the metrics used, the task type selected, the datasets used, and any custom metrics you defined. Contains the configuration details of either an automated or human-based evaluation job. Details about the models you specified in your model evaluation job. Contains the configuration details of the inference model used for the evaluation job. Amazon S3 location for where output data is saved. Contains the configuration details of the Amazon S3 bucket for storing the results of the evaluation job. When the model evaluation job was created. The time the evaluation job was created. When the model evaluation job was last modified. The time the evaluation job was last modified. An array of strings the specify why the model evaluation job has failed. A list of strings that specify why the evaluation job failed to create. The unique identifier for the guardrail. The version of the guardrail. The configuration details for the guardrail. Contains configuration details for text generation using a language model via the Contains configuration details of the inference for knowledge base retrieval and response generation. Contains configuration details for retrieving information from a knowledge base. Contains configuration details for retrieving information from a knowledge base and generating responses. The configuration details for retrieving information from a knowledge base and generating responses. Contains configuration details for returning the results from the vector search. Contains configuration details for retrieving information from a knowledge base. The unique identifier of the knowledge base. The Amazon Resource Name (ARN) of the foundation model or inference profile used to generate responses. Contains configuration details for retrieving text chunks. Contains configurations details for response generation based on retrieved text chunks. Contains configuration details for the model to process the prompt prior to retrieval and response generation. Contains configuration details for retrieving information from a knowledge base and generating responses. The number of text chunks to retrieve; the number of results to return. By default, Amazon Bedrock decides a search strategy for you. If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a Specifies the filters to use on the metadata fields in the knowledge base data sources before returning results. The configuration details for returning the results from the knowledge base vector search. A filter that includes model evaluation jobs created after the time specified. A filter to only list evaluation jobs created after a specified time. A filter that includes model evaluation jobs created prior to the time specified. A filter to only list evaluation jobs created before a specified time. Only return jobs where the status condition is met. A filter to only list evaluation jobs that are of a certain status. A filter to only list evaluation jobs that are either model evaluations or knowledge base evaluations. Query parameter string for model evaluation job names. A filter to only list evaluation jobs that contain a specified string in the job name. Allows you to sort model evaluation jobs by when they were created. Specifies a creation time to sort the list of evaluation jobs by when they were created. How you want the order of jobs sorted. Specifies whether to sort the list of evaluation jobs by either ascending or descending order. A summary of the model evaluation jobs. A list of summaries of the evaluation jobs. Contains configuration details for transforming the prompt. The configuration details for the model to process the prompt prior to retrieval and response generation. The template for the prompt that's sent to the model for response generation. You can include prompt placeholders, which become replaced before the prompt is sent to the model to provide instructions and context to the model. In addition, you can include XML tags to delineate meaningful sections of the prompt template. For more information, see Knowledge base prompt template and Use XML tags with Anthropic Claude models. The template for the prompt that's sent to the model for response generation. The type of transformation to apply to the prompt. The configuration details for transforming the prompt. Contains configuration details for knowledge base retrieval and response generation. Contains configuration details for retrieval of information and response generation. Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value matches the value in this object. The following example would return data sources with an animal attribute whose value is 'cat': Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned. The following example would return data sources that don't contain an animal attribute whose value is 'cat': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than the value in this object. The following example would return data sources with an year attribute whose value is greater than '1989': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object. The following example would return data sources with an year attribute whose value is greater than or equal to '1989': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than the value in this object. The following example would return data sources with an year attribute whose value is less than to '1989': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object. The following example would return data sources with an year attribute whose value is less than or equal to '1989': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object. The following example would return data sources with an animal attribute that is either 'cat' or 'dog': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object. The following example would return data sources whose animal attribute is neither 'cat' nor 'dog': Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value starts with the value in this object. This filter is currently only supported for Amazon OpenSearch Serverless vector stores. The following example would return data sources with an animal attribute starts with 'ca' (for example, 'cat' or 'camel'). Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is a list that contains the value as one of its members. The following example would return data sources with an animals attribute that is a list containing a cat member (for example, Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is one of the following: A string that contains the value as a substring. The following example would return data sources with an animal attribute that contains the substring at (for example, 'cat'): A list with a member that contains the value as a substring. The following example would return data sources with an animals attribute that is a list containing a member that contains the substring at (for example, Knowledge base data sources are returned if their metadata attributes fulfill all the filter conditions inside this list. Knowledge base data sources are returned if their metadata attributes fulfill at least one of the filter conditions inside this list. Specifies the filters to use on the metadata attributes/fields in the knowledge base data sources before returning results. The type of resource that contains your data for retrieving information and generating responses. If you choose to use Contains configuration details for the knowledge base retrieval and response generation. The configuration for the external source wrapper object in the Contains configuration details for a knowledge base retrieval and response generation. The unique identifier of the knowledge base. Contains configuration details for knowledge base retrieval. The configuration details for retrieving information from a knowledge base. ARN of a IAM role The S3 URI location for the wrapper object of the document. The unique wrapper object of the document from the S3 location. The ARN of the model evaluation job you want to stop. The Amazon Resource Name (ARN) of the evaluation job you want to stop. Controls the random-ness of text generated by the language model, influencing how much the model sticks to the most predictable next words versus exploring more surprising options. A lower temperature value (e.g. 0.2 or 0.3) makes model outputs more deterministic or predictable, while a higher temperature (e.g. 0.8 or 0.9) makes the outputs more creative or unpredictable. A probability distribution threshold which controls what the model considers for the set of possible next tokens. The model will only consider the top p% of the probability distribution when generating the next token. The maximum number of tokens to generate in the output text. Do not use the minimum of 0 or the maximum of 65536. The limit values described here are arbitrary values, for actual values consult the limits defined by your specific model. A list of sequences of characters that, if generated, will cause the model to stop generating further tokens. Do not use a minimum length of 1 or a maximum length of 1000. The limit values described here are arbitrary values, for actual values consult the limits defined by your specific model. The configuration details for text generation using a language model via the The configuration of a virtual private cloud (VPC). For more information, see Protect your data using Amazon Virtual Private Cloud and Amazon Web Services PrivateLink. Describes the API operations for creating, managing, fine-turning, and evaluating Amazon Bedrock models. Get details for the specified image lifecycle policy. Verify the subscription and perform resource dependency checks on the requested Amazon Web Services Marketplace resource. For Amazon Web Services Marketplace components, the response contains fields to download the components and their artifacts. Describes the current status of the component. This is used for components that are no longer active. Describes the current status of the component. Indicates whether component source is hidden from view in the console, and from component detail results for API, CLI, or SDK operations. Contains product codes that are used for billing purposes for Amazon Web Services Marketplace components. A detailed view of a component. The date that the component was created. Describes the current status of the component version. Contains product codes that are used for billing purposes for Amazon Web Services Marketplace components. The defining characteristics of a specific version of an Amazon Web Services TOE component. Specifies which type of Amazon Web Services Marketplace resource Image Builder retrieves. The Amazon Resource Name (ARN) that uniquely identifies an Amazon Web Services Marketplace resource. The bucket path that you can specify to download the resource from Amazon S3. The Amazon Resource Name (ARN) for the Amazon Web Services Marketplace resource that was requested. The obfuscated S3 URL to download the component artifact from. Returns obfuscated data that contains the YAML content of the component. The name of the package as reported to the operating system package manager. The name of the package that's reported to the operating system package manager. The version of the package as reported to the operating system package manager. The version of the package that's reported to the operating system package manager. Represents a package installed on an Image Builder image. A software package that's installed on top of the base image to create a customized image. The maximum time in minutes that tests are permitted to run. The timeoutMinutes attribute is not currently active. This value is ignored. The maximum time in minutes that tests are permitted to run. The timeout attribute is not currently active. This value is ignored. Configure image tests for your pipeline build. Tests run after building the image, to verify that the AMI or container image is valid before distributing it. Logging configuration defines where Image Builder uploads your logs. For Amazon Web Services Marketplace components, this contains the product code ID that can be stamped onto an EC2 AMI to ensure that components are billed correctly. If this property is empty, it might mean that the component is not published. The owner of the product code that's billed. If this property is empty, it might mean that the component is not published. Information about a single product code. The name of the container repository where the output container image is stored. This name is prefixed by the repository location. The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, The container repository where the output container image is stored. Create a monitor for specific network flows between local and remote resources, so that you can monitor network performance for one or several of your workloads. For each monitor, Network Flow Monitor publishes detailed end-to-end performance metrics and a network health indicators (NHI) that informs you whether there were Amazon Web Services network issues for one or more of the network flows tracked by a monitor, during a time period that you choose. Create a scope of resources that you want to be available for Network Flow Monitor to generate metrics for, when you have active agents on those resources sending metrics reports to the Network Flow Monitor backend. This call returns a scope ID to identify the scope. When you create a scope, you enable permissions for Network Flow Monitor. The scope is set to the resources for the Amazon Web Services that enables the feature. Deletes a monitor in Network Flow Monitor. Deletes a scope that has been defined. Gets information about a monitor in Network Flow Monitor based on a monitor name. The information returned includes the Amazon Resource Name (ARN), create time, modified time, resources included in the monitor, and status information. Return the data for a query with the Network Flow Monitor query interface. You specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor. Create a query ID for this call by calling the corresponding API call to start the query, Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Return the data for a query with the Network Flow Monitor query interface. You specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor. Create a query ID for this call by calling the corresponding API call to start the query, Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a scope ID. This query returns data for the top contributors for workload insights. Workload insights provide a high level view of network flow performance data collected by agents for a scope. Create a query ID for this call by calling the corresponding API call to start the query, Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. The top contributor network flows overall for a specific metric type, for example, the number of retransmissions. Returns the current status of a query for the Network Flow Monitor query interface, for a specified query ID and monitor. This call returns the query status for the top contributors for a monitor. When you start a query, use this call to check the status of the query to make sure that it has has When you run a query, use this call to check the status of the query to make sure that the query has Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for workload insights. When you start a query, use this call to check the status of the query to make sure that it has has Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Returns the current status of a query for the Network Flow Monitor query interface, for a specified query ID and monitor. This call returns the query status for the top contributors data for workload insights. When you start a query, use this call to check the status of the query to make sure that it has has Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. The top contributor network flows overall for a specific metric type, for example, the number of retransmissions. Gets information about a scope, including the name, status, tags, and target details. The scope in Network Flow Monitor is an account. List all monitors in an account. Optionally, you can list only monitors that have a specific status, by using the List all the scopes for an account. Returns all the tags for a resource. Start a query to return the data with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor. Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Start a query to return the data with the Network Flow Monitor query interface. Specify the query that you want to start by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor. Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a scope ID. This query returns data for the top contributors for workload insights. Workload insights provide a high level view of network flow performance data collected by agents for a scope. A query ID is returned from an API call to start a query of a specific type; for example Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. The top contributor network flows overall for a specific metric type, for example, the number of retransmissions. Stop a query with the Network Flow Monitor query interface. Specify the query that you want to stop by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor. Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Stop a query with the Network Flow Monitor query interface. Specify the query that you want to stop by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor. Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a scope ID. This query returns data for the top contributors for workload insights. Workload insights provide a high level view of network flow performance data collected by agents for a scope. Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor. The top contributor network flows overall for a specific metric type, for example, the number of retransmissions. Adds a tag to a resource. Removes a tag from a resource. Update a monitor to add or remove local or remote resources. Update a scope to add or remove resources that you want to be available for Network Flow Monitor to generate metrics for, when you have active agents on those resources sending metrics reports to the Network Flow Monitor backend. You don't have sufficient permission to perform this action. The requested resource is in use. The name of the monitor. The local resources to monitor. A local resource, in a bi-directional flow of a workload, is the host where the agent is installed. For example, if a workload consists of an interaction between a web service and a backend database (for example, Amazon Relational Database Service (RDS)), the EC2 instance hosting the web service, which also runs the agent, is the local resource. The remote resources to monitor. A remote resource is the other endpoint in the bi-directional flow of a workload, with a local resource. For example, Amazon Relational Database Service (RDS) can be a remote resource. The Amazon Resource Name (ARN) of the scope for the monitor. A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. Don't reuse the same client token for other API requests. The tags for a monitor. You can add a maximum of 200 tags. The Amazon Resource Name (ARN) of the monitor. The name of the monitor. The status of a monitor. The status can be one of the following The local resources to monitor. A local resource, in a bi-directional flow of a workload, is the host where the agent is installed. The remote resources to monitor. A remote resource is the other endpoint in the bi-directional flow of a workload, with a local resource. For example, Amazon Relational Database Service (RDS) can be a remote resource. The remote resource is identified by its ARN or an identifier. The date and time when the monitor was created. The last date and time that the monitor was modified. The tags for a monitor. The targets to define the scope to be monitored. Currently, a target is an Amazon Web Services account. A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. Don't reuse the same client token for other API requests. The tags for a scope. You can add a maximum of 200 tags. The identifier for the scope that includes the resources you want to get metrics for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The status for a call to create a scope. The status can be one of the following: The Amazon Resource Name (ARN) of the scope. The tags for a scope. The name of the monitor to delete. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The name of the monitor. The Amazon Resource Name (ARN) of the monitor. The name of the monitor. The status of a monitor. The status can be one of the following The local resources for this monitor. The remote resources for this monitor. The date and time when the monitor was created. The date and time when the monitor was last modified. The tags for a monitor. The name of the monitor. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The token for the next set of results. You receive this token from a previous call. The number of query results that you want to return with this call. The units for a metric returned by the query. The top contributor network flows overall for a specific metric type, for example, the number of retransmissions. The token for the next set of results. You receive this token from a previous call. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The token for the next set of results. You receive this token from a previous call. The number of query results that you want to return with this call. The units for a metric returned by the query. The datapoints returned by the query. The token for the next set of results. You receive this token from a previous call. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The token for the next set of results. You receive this token from a previous call. The number of query results that you want to return with this call. The top contributor network flows overall for a specific metric type, for example, the number of retransmissions. The token for the next set of results. You receive this token from a previous call. The name of the monitor. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. When you run a query, use this call to check the status of the query to make sure that the query has The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. A scope ID is returned from a The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The status of a query for top contributors data. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. When you run a query, use this call to check the status of the query to make sure that the query has The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. A scope ID is returned from a The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. A scope ID is returned from a The status of a scope. The status can be one of the following: The Amazon Resource Name (ARN) of the scope. The targets for a scope The tags for a scope. An internal error occurred. The service name for a local resource. The name of the pod for a local resource. The namespace of the pod for a local resource. The service name for a remote resource. The name of the pod for a remote resource. The namespace of the pod for a remote resource. Meta data about Kubernetes resources. The token for the next set of results. You receive this token from a previous call. The number of query results that you want to return with this call. The status of a monitor. The status can be one of the following The monitors that are in an account. The token for the next set of results. You receive this token from a previous call. The token for the next set of results. You receive this token from a previous call. The number of query results that you want to return with this call. The scopes returned by the call. The token for the next set of results. You receive this token from a previous call. The Amazon Resource Name (ARN) of the resource. The tags for a resource. The type of the local resource. Valid values are The identifier of the local resource, such as an ARN. A local resource is the host where the agent is installed. Local resources can be a a subnet, a VPC, or an Availability Zone. The type of the remote resource. Valid values are The identifier of the remote resource, such as an ARN. A remote resource is the other endpoint in a network flow. That is, one endpoint is the local resource and the other is the remote resource. Remote resources can be a a subnet, a VPC, an Availability Zone, or an Amazon Web Services service. The Amazon Resource Name (ARN) of the monitor. The name of the monitor. The status of a monitor. The status can be one of the following A summary of information about a monitor, includ the ARN, the name, and the status. The IP address of the local resource for a top contributor network flow. The secure network address translation (SNAT) IP address for a top contributor network flow. The instance identifier for the local resource for a top contributor network flow. The VPC ID for a top contributor network flow for the local resource. The Amazon Web Services Region for the local resource for a top contributor network flow. The Availability Zone for the local resource for a top contributor network flow. The subnet ID for the local resource for a top contributor network flow. The target port. The destination category for a top contributors row. Destination categories can be one of the following: The VPC ID for a top contributor network flow for the remote resource. The Amazon Web Services Region for the remote resource for a top contributor network flow. The Availability Zone for the remote resource for a top contributor network flow. The subnet ID for the remote resource for a top contributor network flow. The instance identifier for the remote resource for a top contributor network flow. The IP address of the remote resource for a top contributor network flow. The destination network address translation (DNAT) IP address for a top contributor network flow. The value of the metric for a top contributor network flow. The constructs traversed by a network flow. Meta data about Kubernetes resources. The Amazon Resource Name (ARN) of a local resource. The Amazon Resource Name (ARN) of a local subnet. The Amazon Resource Name (ARN) of a local VPC. The Amazon Resource Name (ARN) of a remote resource. The Amazon Resource Name (ARN) of a remote subnet. The Amazon Resource Name (ARN) of a remote VPC. A set of information for a top contributor network flow in a monitor. In a monitor, Network Flow Monitor returns information about the network flows for top contributors for each metric. Top contributors are network flows with the top values for each metric type. The request specifies a resource that doesn't exist. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The status of a scope. The status can be one of the following, depending on the state of scope creation: The Amazon Resource Name (ARN) of the scope. A summary of information about a scope, including the ARN, target ID, and Amazon Web Services Region. The request exceeded a service quota. The name of the monitor. The timestamp that is the date and time beginning of the period that you want to retrieve results for with your query. The timestamp that is the date and time end of the period that you want to retrieve results for with your query. The metric that you want to query top contributors for. That is, you can specify this metric to return the top contributor network flows, for this type of metric, for a monitor and (optionally) within a specific category, such as network flows between Availability Zones. The category that you want to query top contributors for, for a specific monitor. Destination categories can be one of the following: The maximum number of top contributors to return. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The timestamp that is the date and time beginning of the period that you want to retrieve results for with your query. The timestamp that is the date and time end of the period that you want to retrieve results for with your query. The metric that you want to query top contributors for. That is, you can specify this metric to return the top contributor network flows, for this type of metric, for a monitor and (optionally) within a specific category, such as network flows between Availability Zones. The destination category for a top contributors. Destination categories can be one of the following: The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. A scope ID is returned from a The timestamp that is the date and time beginning of the period that you want to retrieve results for with your query. The timestamp that is the date and time end of the period that you want to retrieve results for with your query. The metric that you want to query top contributors for. That is, you can specify this metric to return the top contributor network flows, for this type of metric, for a monitor and (optionally) within a specific category, such as network flows between Availability Zones. The destination category for a top contributors row. Destination categories can be one of the following: The maximum number of top contributors to return. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The name of the monitor. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query. The Amazon Resource Name (ARN) of the resource. The tags for a resource. The identifier for the account for a target. A target ID is an internally-generated identifier for a target. A target allows you to identify all the resources in a Network Flow Monitor scope. Currently, a target is always an Amazon Web Services account. The identifier for a target. The type of a target. A target type is currently always A target identifier is a pair of identifying information for a resource that is included in a target. A target identifier includes the target ID and the target type. A target identifier is a pair of identifying information for a resource that is included in a target. A target identifier includes the target ID and the target type. The Amazon Web Services Region where the target resource is located. A target resource in a scope. The resource is identified by a Region and a target identifier, which includes a target ID and a target type. The request was denied due to request throttling. The identifier for the traversed component. The type of component that was traversed. The Amazon Resource Name (ARN) of a tranversed component. The service name for the traversed component. A section of the network that a network flow has traveled through. The Amazon Resource Name (ARN) of the resource. Keys that you specified when you tagged a resource. The name of the monitor. The local resources to add, as an array of resources with identifiers and types. The local resources to remove, as an array of resources with identifiers and types. The remove resources to add, as an array of resources with identifiers and types. The remove resources to remove, as an array of resources with identifiers and types. A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. Don't reuse the same client token for other API requests. The Amazon Resource Name (ARN) of the monitor. The name of the monitor. The status of a monitor. The status can be one of the following The local resources updated for a monitor, as an array of resources with identifiers and types. The remote resources updated for a monitor, as an array of resources with identifiers and types. The date and time when the monitor was created. The last date and time that the monitor was modified. The tags for a monitor. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. A list of resources to add to a scope. A list of resources to delete from a scope. The identifier for the scope that includes the resources you want to get data results for. A scope ID is an internally-generated identifier that includes all the resources for a specific root account. The status for a call to update a scope. The status can be one of the following: The Amazon Resource Name (ARN) of the scope. The tags for a scope. Invalid request. An array of the timestamps for the data point. The values for the data point. The label identifying the data point. A data point for a top contributor network flow in a scope. Network Flow Monitor returns information about the network flows with the top values for each metric type, which are called top contributors. The account ID for a specific row of data. The subnet identifier for the local resource. The identifier for the Availability Zone where the local resource is located. The identifier for the VPC for the local resource. The Amazon Web Services Region where the local resource is located. The identifier of a remote resource. The value for a metric. The Amazon Resource Name (ARN) of a local subnet. The Amazon Resource Name (ARN) of a local VPC. A row for a top contributor for a scope. Network Flow Monitor is a feature of Amazon CloudWatch Network Monitoring that provides visibility into the performance of network flows for your Amazon Web Services workloads, between instances in subnets, as well as to and from Amazon Web Services. Lightweight agents that you install on the instances capture performance metrics for your network flows, such as packet loss and latency, and send them to the Network Flow Monitor backend. Then, you can view and analyze metrics from the top contributors for each metric type, to help troubleshoot issues. In addition, when you create a monitor, Network Flow Monitor provides a network health indicator (NHI) that informs you whether there were Amazon Web Services network issues for one or more of the network flows tracked by a monitor, during a time period that you choose. By using this value, you can independently determine if the Amazon Web Services network is impacting your workload during a specific time frame, to help you focus troubleshooting efforts. To learn more about Network Flow Monitor, see the Network Flow Monitor User Guide in the Amazon CloudWatch User Guide. Creates a connection. A connection defines the authorization type and credentials to use for authorization with an API destination HTTP endpoint. Creates a connection. A connection defines the authorization type and credentials to use for authorization with an API destination HTTP endpoint. For more information, see Connections for endpoint targets in the Amazon EventBridge User Guide. Sends custom events to Amazon EventBridge so that they can be matched to rules. The maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including the event and any necessary characters and keys of the JSON representation of the event. To learn more, see Calculating PutEvents event entry size in the Amazon EventBridge User Guide PutEvents accepts the data in JSON format. For the JSON number (integer) data type, the constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of 9,223,372,036,854,775,807. PutEvents will only process nested JSON up to 1100 levels deep. Sends custom events to Amazon EventBridge so that they can be matched to rules. The maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including the event and any necessary characters and keys of the JSON representation of the event. To learn more, see Calculating PutEvents event entry size in the Amazon EventBridge User Guide PutEvents accepts the data in JSON format. For the JSON number (integer) data type, the constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of 9,223,372,036,854,775,807. PutEvents will only process nested JSON up to 1000 levels deep. Running For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target. To enable multiple Amazon Web Services accounts to put events to your event bus, run If you grant permissions using an organization, then accounts in that organization must specify a The permission policy on the event bus cannot exceed 10 KB in size. Running For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target. To enable multiple Amazon Web Services accounts to put events to your event bus, run If you grant permissions using an organization, then accounts in that organization must specify a The permission policy on the event bus cannot exceed 10 KB in size. Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule. A single rule watches for events from a single event bus. Events generated by Amazon Web Services services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus. If you are updating an existing rule, the rule is replaced with what you specify in this When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect. A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule. When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the If you are updating an existing rule, any tags you specify in the Most services in Amazon Web Services treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match. In EventBridge, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop. To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change. An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets. Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule. A single rule watches for events from a single event bus. Events generated by Amazon Web Services services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus. If you are updating an existing rule, the rule is replaced with what you specify in this When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect. A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule. When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the If you are updating an existing rule, any tags you specify in the Most services in Amazon Web Services treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match. In EventBridge, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop. To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change. An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets. To create a rule that filters for management events from Amazon Web Services services, see Receiving read-only management events from Amazon Web Services services in the EventBridge User Guide. Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge (CloudWatch Events), rules and event buses can be tagged. Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge, rules and event buses can be tagged. Updates settings for a connection. You do not have the necessary permissons for this action. Additional parameters for the connection that are passed through with every invocation to the HTTP endpoint. For private OAuth authentication endpoints. The parameters EventBridge uses to authenticate against the endpoint. For more information, see Authorization methods for connections in the Amazon EventBridge User Guide . Contains the authorization parameters to use for the connection. Tthe authorization parameters to use for the connection. The user name to use for Basic authorization. Contains the authorization parameters for the connection if Basic is specified as the authorization type. The authorization parameters for the connection if Basic is specified as the authorization type. Specified whether the value is secret. Specifies whether the value is secret. Additional parameter included in the body. You can include up to 100 additional body parameters per request. An event payload cannot exceed 64 KB. Specified whether the value is a secret. Specifies whether the value is a secret. Additional parameter included in the header. You can include up to 100 additional header parameters per request. An event payload cannot exceed 64 KB. Contains additional header parameters for the connection. Any additional header parameters for the connection. Contains additional query string parameters for the connection. Any additional query string parameters for the connection. Contains additional body string parameters for the connection. Any additional body string parameters for the connection. Contains additional parameters for the connection. Any additional parameters for the connection. The client ID associated with the response to the connection request. Contains the client response parameters for the connection when OAuth is specified as the authorization type. The client response parameters for the connection when OAuth is specified as the authorization type. A Details about the client parameters returned when OAuth is specified as the authorization type. The additional HTTP parameters used for the OAuth authorization request. Contains the response parameters when OAuth is specified as the authorization type. The response parameters when OAuth is specified as the authorization type. Specifies whether the value is secret. Additional query string parameter for the connection. You can include up to 100 additional query string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Any additional query string parameter for the connection. You can include up to 100 additional query string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. The Amazon Resource Name (ARN) of the resource configuration for the resource endpoint. The Amazon Resource Name (ARN) of the resource configuration for the resource endpoint. The parameters for EventBridge to use when invoking the resource endpoint. The parameters for EventBridge to use when invoking the resource endpoint. The value for the API key to use for authorization. Contains the API key authorization parameters for the connection. The API key authorization parameters for the connection. A The Basic authorization parameters to use for the connection. A The OAuth authorization parameters to use for the connection. A The API key authorization parameters to use for the connection. A The API key authorization parameters to use for the connection. Note that if you include additional parameters for the target of a rule via If you specify a private OAuth endpoint, the parameters for EventBridge to use when authenticating against the endpoint. For more information, see Authorization methods for connections in the Amazon EventBridge User Guide . Contains the authorization parameters for the connection. The authorization parameters for the connection. You must include only authorization parameters for the The client secret associated with the client ID to use for OAuth authorization for the connection. Contains the Basic authorization parameters to use for the connection. The Basic authorization parameters to use for the connection. A The client parameters for OAuth authorization. A Details about the additional parameters to use for the connection. Contains the OAuth authorization parameters to use for the connection. A The authorization parameters to use to authorize with the endpoint. You must include only authorization parameters for the For connections to private resource endpoints, the parameters to use for invoking the resource endpoint. For more information, see Connecting to private resources in the Amazon EventBridge User Guide . The ARN of the SQS queue specified as the target for the dead-letter queue. Configuration details of the Amazon SQS queue for EventBridge to use as a dead-letter queue (DLQ). For more information, see Event retry policy and using dead-letter queues in the EventBridge User Guide. Configuration details of the Amazon SQS queue for EventBridge to use as a dead-letter queue (DLQ). For more information, see Using dead-letter queues to process undelivered events in the EventBridge User Guide. The parameters for EventBridge to use when invoking the resource endpoint. If the connection uses a private OAuth endpoint, the parameters for EventBridge to use when authenticating against the endpoint. For more information, see Authorization methods for connections in the Amazon EventBridge User Guide . The Amazon Resource Name (ARN) of the resource configuration for the private API. For connections to private APIs, the Amazon Resource Name (ARN) of the resource association EventBridge created between the connection and the private API's resource configuration. The parameters for EventBridge to use when invoking the resource endpoint. The description for the connection retrieved. For connections to private resource endpoints. The parameters EventBridge uses to invoke the resource endpoint. For more information, see Connecting to private resources in the Amazon EventBridge User Guide . The state of the connection retrieved. The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an An array of An array that includes information about each API destination. A token you can use in a subsequent request to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an A token you can use in a subsequent request to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an If The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an If A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an A token you can use in a subsequent operation to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an A token you can use in a subsequent operation to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to this operation. Specifying this retrieves the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an A token you can use in a subsequent operation to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to this operation. Specifying this retrieves the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an A token you can use in a subsequent operation to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an Indicates whether there are additional results to retrieve. If there are no more results, the value is null. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an Indicates whether there are additional results to retrieve. If there are no more results, the value is null. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The token returned by a previous call to retrieve the next set of results. The token returned by a previous call, which you can use to retrieve the next set of results. The value of Using an expired pagination token results in an Indicates whether there are additional results to retrieve. If there are no more results, the value is null. A token indicating there are more results available. If there are no more results, no token is included in the response. The value of Using an expired pagination token results in an The The retry policy configuration to use for the dead-letter queue. This request cannot be completed due to throttling issues. The value associated with teh API key to use for authorization. The value associated with the API key to use for authorization. Contains the API key authorization parameters to use to update the connection. A The authorization parameters for Basic authorization. A The authorization parameters for OAuth authorization. A The authorization parameters for API key authorization. A The additional parameters to use for the connection. If you specify a private OAuth endpoint, the parameters for EventBridge to use when authenticating against the endpoint. For more information, see Authorization methods for connections in the Amazon EventBridge User Guide . Contains the additional parameters to use for the connection. The password associated with the user name to use for Basic authorization. Contains the Basic authorization parameters for the connection. The Basic authorization parameters for the connection. The client secret assciated with the client ID to use for OAuth authorization. Contains the OAuth authorization parameters to use for the connection. The OAuth authorization parameters to use for the connection. A The client parameters to use for the connection when OAuth is specified as the authorization type. The additional HTTP parameters used for the OAuth authorization request. Contains the OAuth request parameters to use for the connection. The OAuth request parameters to use for the connection. The authorization parameters to use for the connection. For connections to private resource endpoints, the parameters to use for invoking the resource endpoint. For more information, see Connecting to private resources in the Amazon EventBridge User Guide . Creates outbound requests for the specified campaign Amazon Connect account. This API is idempotent. Takes in a list of profile outbound requests to be placed as part of an outbound campaign. This API is idempotent. Server-side encryption type. Event trigger of the campaign Enumeration of Customer Profiles event type Failure details for a profile outbound request List of failed profile outbound requests Predictive config Identifier of the customer profile Information about a profile outbound request Predefined code indicating the error that caused the failure Identifier of the profile outbound request List of profile outbound requests The response for PutOutboundRequestBatch API. The request for PutProfileOutboundRequestBatch API The response for PutProfileOutboundRequestBatch API Source of the campaign Success details for a profile outbound request List of successful profile outbound requests Creates a cluster. All nodes in the cluster run the same protocol-compliant engine software. Creates a new multi-Region cluster. Deletes a cluster. It also deletes all associated nodes and node endpoints Deletes a cluster. It also deletes all associated nodes and node endpoints. Deletes an existing multi-Region cluster. Returns a list of ACLs Returns a list of ACLs. Returns a list of the available engine versions. Returns a list of the available Redis OSS engine versions. Returns events related to clusters, security groups, and parameter groups. You can obtain events specific to a particular cluster, security group, or parameter group by providing the name as a parameter. By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary. Returns details about one or more multi-Region clusters. Returns details of the service updates Returns details of the service updates. Used to failover a shard. This API is designed for testing the behavior of your application in case of MemoryDB failover. It is not designed to be used as a production-level tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large scale operational events, Amazon may block this API. Lists the allowed updates for a multi-Region cluster. Lists all tags currently on a named resource. A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track your MemoryDB resources. For more information, see Tagging your MemoryDB resources Lists all tags currently on a named resource. A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track your MemoryDB resources. For more information, see Tagging your MemoryDB resources. A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your MemoryDB resources. When you add or remove tags on clusters, those actions will be replicated to all nodes in the cluster. For more information, see Resource-level permissions. For example, you can use cost-allocation tags to your MemoryDB resources, Amazon generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services. For more information, see Using Cost Allocation Tags. Use this operation to remove tags on a resource Use this operation to remove tags on a resource. Modifies the settings for a cluster. You can use this operation to change one or more cluster configuration settings by specifying the settings and the new values. Updates the configuration of an existing multi-Region cluster. A group of settings that are currently being applied. The name of the multi-Region cluster that this cluster belongs to. The number of shards in the cluster The Redis OSS or Valkey engine used by the cluster. The name of the engine used by the cluster. The Redis engine version used by the cluster The Redis OSS engine version used by the cluster The engine patch version used by the cluster The Redis OSS engine patch version used by the cluster The configuration for the Redis OSS or Valkey engine used by the cluster. The name of the engine used by the cluster configuration. The engine version used by the cluster The Redis OSS engine version used by the cluster The list of shards in the cluster The name of the multi-Region parameter group associated with the cluster configuration. The name for the multi-Region cluster associated with the cluster configuration. A list of cluster configuration options. The compute and memory capacity of the nodes in the cluster. The name of the multi-Region cluster to be created. The name of the parameter group associated with the cluster. The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey. The name of the engine to be used for the cluster. The version number of the engine to be used for the cluster. The version number of the Redis OSS engine to be used for the cluster. A suffix to be added to the multi-Region cluster name. A description for the multi-Region cluster. The name of the engine to be used for the multi-Region cluster. The version of the engine to be used for the multi-Region cluster. The node type to be used for the multi-Region cluster. The name of the multi-Region parameter group to be associated with the cluster. The number of shards for the multi-Region cluster. Whether to enable TLS encryption for the multi-Region cluster. A list of tags to be applied to the multi-Region cluster. Details about the newly created multi-Region cluster. The newly-created subnet group The newly-created subnet group. The name of the Access Control List to delete The name of the Access Control List to delete. The name of the cluster to be deleted The name of the multi-Region cluster to be deleted. The user-supplied name of a final cluster snapshot. This is the unique name that identifies the snapshot. MemoryDB creates the snapshot, and then deletes the cluster immediately afterward. The cluster object that has been deleted The cluster object that has been deleted. The name of the multi-Region cluster to be deleted. Details about the deleted multi-Region cluster. The name of the snapshot to delete The name of the snapshot to delete. The name of the subnet group to delete The name of the subnet group to delete. The name of the ACL The name of the ACL. The list of ACLs The list of ACLs. The name of the cluster The name of the cluster. The engine version to return. Valid values are either valkey or redis. The name of the engine for which to list available versions. The engine version. The Redis OSS engine version The name of a specific multi-Region cluster to describe. The maximum number of results to return. A token to specify where to start paginating. Details about the multi-Region cluster. A token to use to retrieve the next page of results. A list of multi-Region clusters. The list of cluster names to identify service updates to apply The list of cluster names to identify service updates to apply. The status(es) of the service updates to filter on The status(es) of the service updates to filter on. The name of the user The name of the user. The version of the Redis OSS or Valkey engine used by the cluster. The name of the engine for which version information is provided. Specifies the name of the parameter group family to which the engine default parameters apply. Provides details of the engine version. Provides details of the Redis OSS engine version The cluster being failed over The cluster being failed over. The name of the shard The name of the shard. The cluster being failed over The cluster being failed over. The requested operation cannot be performed on the multi-Region cluster in its current state. The name of the multi-Region cluster. The node types that the cluster can be scaled up to. The node types that the cluster can be scaled down to. The Amazon Resource Name (ARN) of the resource for which you want the list of tags The Amazon Resource Name (ARN) of the resource for which you want the list of tags. The name of the multi-Region cluster. The description of the multi-Region cluster. The current status of the multi-Region cluster. The node type used by the multi-Region cluster. The name of the engine used by the multi-Region cluster. The version of the engine used by the multi-Region cluster. The number of shards in the multi-Region cluster. The clusters in this multi-Region cluster. The name of the multi-Region parameter group associated with the cluster. Indiciates if the multi-Region cluster is TLS enabled. The Amazon Resource Name (ARN) of the multi-Region cluster. Represents a multi-Region cluster. A multi-Region cluster with the specified name already exists. The specified multi-Region cluster does not exist. The specified multi-Region parameter group does not exist. The name of the Regional cluster The Region the current Regional cluster is assigned to. The status of the Regional cluster. The Amazon Resource Name (ARN) the Regional cluster Represents a Regional cluster The MemoryDB engine to which the update applies. The values are either Redis or Valkey. The name of the engine for which a service update is available. The Amazon Resource Name (ARN) of the resource to which the tags are to be added The Amazon Resource Name (ARN) of the resource to which the tags are to be added. The Amazon Resource Name (ARN) of the resource to which the tags are to be removed The Amazon Resource Name (ARN) of the resource to which the tags are to be removed. The list of keys of the tags that are to be removed The list of keys of the tags that are to be removed. The list of tags removed The list of tags removed. The name of the Access Control List The name of the Access Control List. The list of users to add to the Access Control List The list of users to add to the Access Control List. The list of users to remove from the Access Control List The list of users to remove from the Access Control List. The updated Access Control List The updated Access Control List. The name of the cluster to update The name of the cluster to update. The description of the cluster to update The description of the cluster to update. The SecurityGroupIds to update The SecurityGroupIds to update. The SNS topic ARN to update The SNS topic ARN to update. The name of the parameter group to update The name of the parameter group to update. The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey. The name of the engine to be used for the cluster. The number of replicas that will reside in each shard The number of replicas that will reside in each shard. The number of shards in the cluster The number of shards in the cluster. The Access Control List that is associated with the cluster The Access Control List that is associated with the cluster. The updated cluster The updated cluster. The name of the multi-Region cluster to be updated. The new node type to be used for the multi-Region cluster. A new description for the multi-Region cluster. The new engine version to be used for the multi-Region cluster. The new multi-Region parameter group to be associated with the cluster. Whether to force the update even if it may cause data loss. The status of updating the multi-Region cluster. MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands. MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands. Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles. Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain Creates an event trigger, which specifies the rules when to perform action based on customer's ingested data. Each event stream can be associated with only one integration in the same region and AWS account as the event stream. Disables and deletes the specified event stream. Disable and deletes the Event Trigger. You cannot delete an Event Trigger with an active Integration associated. Returns information about the specified event stream in a specific domain. Get a specific Event Trigger from the domain. Returns a list of all the event streams in a specific domain. List all Event Triggers under a domain. Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key. After a domain is created, the name can’t be changed. Use this API or CreateDomain to enable identity resolution: set To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply. To add or remove tags on an existing Domain, see TagResource/UntagResource. Update the properties of an Event Trigger. The unique name of the domain. The unique name of the event trigger. The unique name of the object type. The description of the event trigger. A list of conditions that determine when an event should trigger the destination. The destination is triggered only for profiles that meet the criteria of a segment definition. Defines limits controlling whether an event triggers the destination, based on ingestion latency and the number of invocations per profile over specific time periods. An array of key-value pairs to apply to this resource. The unique name of the event trigger. The unique name of the object type. The description of the event trigger. A list of conditions that determine when an event should trigger the destination. The destination is triggered only for profiles that meet the criteria of a segment definition. Defines limits controlling whether an event triggers the destination, based on ingestion latency and the number of invocations per profile over specific time periods. The timestamp of when the event trigger was created. The timestamp of when the event trigger was most recently updated. An array of key-value pairs to apply to this resource. The unique name of the domain. The unique name of the event trigger. A message that indicates the delete request is done. A list of dimensions to be evaluated for the event. The operator used to combine multiple dimensions. Specifies the circumstances under which the event should trigger the destination. A list of object attributes to be evaluated. A specific event dimension to be assessed. In milliseconds. Specifies that an event will only trigger the destination if it is processed within a certain latency period. A list of time periods during which the limits apply. Defines limits controlling whether an event triggers the destination, based on ingestion latency and the number of invocations per profile over specific time periods. The unique name of the object type. The unique name of the event trigger. The description of the event trigger. The timestamp of when the event trigger was created. The timestamp of when the event trigger was most recently updated. An array of key-value pairs to apply to this resource. The summary of the event trigger. The unique name of the domain. The unique name of the event trigger. The unique name of the event trigger. The unique name of the object type. The description of the event trigger. A list of conditions that determine when an event should trigger the destination. The destination is triggered only for profiles that meet the criteria of a segment definition. Defines limits controlling whether an event triggers the destination, based on ingestion latency and the number of invocations per profile over specific time periods. The timestamp of when the event trigger was created. The timestamp of when the event trigger was most recently updated. An array of key-value pairs to apply to this resource. The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. A list of unique names for active event triggers associated with the integration. This list would be empty if no Event Trigger is associated with the integration. The unique name of the domain. The pagination token to use with ListEventTriggers. The maximum number of results to return per page. The list of Event Triggers. The pagination token from the previous call to ListEventTriggers. The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. A list of unique names for active event triggers associated with the integration. An integration in list of integrations. An attribute contained within a source object. A field defined within an object type. The operator used to compare an attribute against a list of values. A list of attribute values used for comparison. The criteria that a specific object attribute must meet to trigger the destination. The unit of time. The amount of time of the specified unit. The maximum allowed number of destination invocations per profile. If set to true, there is no limit on the number of destination invocations per profile. The default is false. Defines a limit and the time period during which it is enforced. The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. A list of unique names for active event triggers associated with the integration. The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. A list of unique names for active event triggers associated with the integration. This list would be empty if no Event Trigger is associated with the integration. The unique name of the domain. The unique name of the event trigger. The unique name of the object type. The description of the event trigger. A list of conditions that determine when an event should trigger the destination. The destination is triggered only for profiles that meet the criteria of a segment definition. Defines limits controlling whether an event triggers the destination, based on ingestion latency and the number of invocations per profile over specific time periods. The unique name of the event trigger. The unique name of the object type. The description of the event trigger. A list of conditions that determine when an event should trigger the destination. The destination is triggered only for profiles that meet the criteria of a segment definition. Defines limits controlling whether an event triggers the destination, based on ingestion latency and the number of invocations per profile over specific time periods. The timestamp of when the event trigger was created. The timestamp of when the event trigger was most recently updated. An array of key-value pairs to apply to this resource. This gets the invoice profile associated with a set of accounts. The accounts must be linked accounts under the requester management account organization. This creates a new invoice unit with the provided definition. This deletes an invoice unit with the provided invoice unit ARN. This retrieves the invoice unit definition. This fetches a list of all invoice unit definitions for a given account, as of the provided Lists the tags for a resource. Adds a tag to a resource. Removes a tag from a resource. You can update the invoice unit configuration at any time, and Amazon Web Services will use the latest configuration at the end of the month. You don't have sufficient access to perform this action. You don't have sufficient access to perform this action. Retrieves the corresponding invoice profile data for these account IDs. A list of invoice profiles corresponding to the requested accounts. The unique name of the invoice unit that is shown on the generated invoice. This can't be changed once it is set. To change this name, you must delete the invoice unit recreate. The Amazon Web Services account ID chosen to be the receiver of an invoice unit. All invoices generated for that invoice unit will be sent to this account ID. The invoice unit's description. This can be changed at a later time. Whether the invoice unit based tax inheritance is/ should be enabled or disabled. The The tag structure that contains a tag key and value. The ARN to identify an invoice unit. This information can't be modified or deleted. The ARN to identify an invoice unit. This information can't be modified or deleted. The ARN to identify an invoice unit. This information can't be modified or deleted. An optional input to the list API. You can specify a list of invoice unit names inside filters to return invoice units that match only the specified invoice unit names. If multiple names are provided, the result is an You can specify a list of Amazon Web Services account IDs inside filters to return invoice units that match only the specified accounts. If multiple accounts are provided, the result is an You can specify a list of Amazon Web Services account IDs inside filters to return invoice units that match only the specified accounts. If multiple accounts are provided, the result is an An optional input to the list API. If multiple filters are specified, the returned list will be a configuration that match all of the provided filters. Supported filter types are The ARN to identify an invoice unit. This information can't be modified or deleted. The state of an invoice unit at a specified time. You can see legacy invoice units that are currently deleted if the The ARN to identify an invoice unit. This information can't be modified or deleted. The Amazon Web Services account ID chosen to be the receiver of an invoice unit. All invoices generated for that invoice unit will be sent to this account ID. The unique name of the invoice unit that is shown on the generated invoice. The assigned description for an invoice unit. Whether the invoice unit based tax inheritance is/ should be enabled or disabled. The most recent date the invoice unit response was updated. The processing request failed because of an unknown error, exception, or failure. The processing request failed because of an unknown error, exception, or failure. The account ID the invoice profile is generated for. The name of the person receiving the invoice profile. The address of the receiver that will be printed on the invoice. The email address for the invoice profile receiver. This specifies the issuing entity of the invoice. Your Tax Registration Number (TRN) information. Contains high-level information about the invoice receiver. ARN to identify an invoice unit. This information can't be modified or deleted. The account that receives invoices related to the invoice unit. A unique name that is distinctive within your Amazon Web Services. The assigned description for an invoice unit. This information can't be modified or deleted. Whether the invoice unit based tax inheritance is/ should be enabled or disabled. An The last time the invoice unit was updated. This is important to determine the version of invoice unit configuration used to create the invoices. Any invoice created after this modified time will use this invoice unit configuration. An invoice unit is a set of mutually exclusive accounts that correspond to your business entity. Invoice units allow you separate Amazon Web Services account costs and configures your invoice for each business entity going forward.
"
+ "documentation":"
"
},
"Tags":{
"shape":"Tags",
@@ -1838,7 +1838,7 @@
"members":{
"PolicyType":{
"shape":"EffectivePolicyType",
- "documentation":"
"
+ "documentation":"
"
}
}
},
@@ -2067,7 +2067,8 @@
"TAG_POLICY",
"BACKUP_POLICY",
"AISERVICES_OPT_OUT_POLICY",
- "CHATBOT_POLICY"
+ "CHATBOT_POLICY",
+ "DECLARATIVE_POLICY_EC2"
]
},
"Email":{
@@ -2114,7 +2115,7 @@
},
"PolicyType":{
"shape":"PolicyType",
- "documentation":"
"
+ "documentation":"
"
}
}
},
@@ -2790,7 +2791,7 @@
},
"Filter":{
"shape":"PolicyType",
- "documentation":"
"
+ "documentation":"
"
},
"NextToken":{
"shape":"NextToken",
@@ -2821,7 +2822,7 @@
"members":{
"Filter":{
"shape":"PolicyType",
- "documentation":"
"
+ "documentation":"
"
},
"NextToken":{
"shape":"NextToken",
@@ -3271,7 +3272,8 @@
"TAG_POLICY",
"BACKUP_POLICY",
"AISERVICES_OPT_OUT_POLICY",
- "CHATBOT_POLICY"
+ "CHATBOT_POLICY",
+ "DECLARATIVE_POLICY_EC2"
]
},
"PolicyTypeAlreadyEnabledException":{
From b21fed1d6251e20cc60b1a3437899791a06703a2 Mon Sep 17 00:00:00 2001
From: AWS <>
Date: Mon, 2 Dec 2024 03:48:03 +0000
Subject: [PATCH 06/35] AWS Transfer Family Update: AWS Transfer Family now
offers Web apps that enables simple and secure access to data stored in
Amazon S3.
---
.../feature-AWSTransferFamily-5c2b94e.json | 6 +
.../codegen-resources/paginators-1.json | 6 +
.../codegen-resources/service-2.json | 573 +++++++++++++++++-
3 files changed, 576 insertions(+), 9 deletions(-)
create mode 100644 .changes/next-release/feature-AWSTransferFamily-5c2b94e.json
diff --git a/.changes/next-release/feature-AWSTransferFamily-5c2b94e.json b/.changes/next-release/feature-AWSTransferFamily-5c2b94e.json
new file mode 100644
index 00000000000..bf7b01b7939
--- /dev/null
+++ b/.changes/next-release/feature-AWSTransferFamily-5c2b94e.json
@@ -0,0 +1,6 @@
+{
+ "type": "feature",
+ "category": "AWS Transfer Family",
+ "contributor": "",
+ "description": "AWS Transfer Family now offers Web apps that enables simple and secure access to data stored in Amazon S3."
+}
diff --git a/services/transfer/src/main/resources/codegen-resources/paginators-1.json b/services/transfer/src/main/resources/codegen-resources/paginators-1.json
index 335034d4b60..1653221018a 100644
--- a/services/transfer/src/main/resources/codegen-resources/paginators-1.json
+++ b/services/transfer/src/main/resources/codegen-resources/paginators-1.json
@@ -66,6 +66,12 @@
"limit_key": "MaxResults",
"result_key": "Users"
},
+ "ListWebApps": {
+ "input_token": "NextToken",
+ "output_token": "NextToken",
+ "limit_key": "MaxResults",
+ "result_key": "WebApps"
+ },
"ListWorkflows": {
"input_token": "NextToken",
"output_token": "NextToken",
diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json
index fb236196fe1..85036600deb 100644
--- a/services/transfer/src/main/resources/codegen-resources/service-2.json
+++ b/services/transfer/src/main/resources/codegen-resources/service-2.json
@@ -123,6 +123,23 @@
"documentation":"IdentityProviderType
set to SERVICE_MANAGED
. Using parameters for CreateUser
, you can specify the user name, set the home directory, store the user's public key, and assign the user's Identity and Access Management (IAM) role. You can also optionally add a session policy, and assign metadata with tags that can be used to group and search for users.WebAppCustomization
object that corresponds to the web app ID specified.ServerId
property.ServerId
value that was specified.WebAppId
.WebAppId
.ServerId
parameter.UserName
and ServerId
you specify.ServerId
and the UserName
for the updated user.HomeDirectoryType
to LOGICAL
, and specify HomeDirectoryMappings
with Entry
as root (/
) and Target
as their home directory./test/admin-user
, the following command updates the user so that their configuration in the console shows the Restricted flag as selected. aws transfer update-user --server-id <server-id> --user-name admin-user --home-directory-type LOGICAL --home-directory-mappings \"[{\\\"Entry\\\":\\\"/\\\", \\\"Target\\\":\\\"/test/admin-user\\\"}]\"
IdentityProviderType
is set to AWS_DIRECTORY_SERVICE
, Amazon Web Services_LAMBDA
or API_GATEWAY
. Accepts an array containing all of the information required to use a directory in AWS_DIRECTORY_SERVICE
or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when IdentityProviderType
is set to SERVICE_MANAGED
.IdentityProviderType
is set to AWS_DIRECTORY_SERVICE
, Amazon Web Services_LAMBDA
or API_GATEWAY
. Accepts an array containing all of the information required to use a directory in AWS_DIRECTORY_SERVICE
or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when IdentityProviderType
is set to SERVICE_MANAGED
.AccessEndpoint
is the URL that you provide to your users for them to interact with the Transfer Family web app. You can specify a custom URL or use the default value.DescribeWebApp
.AccessEndpoint
is the URL that you provide to your users for them to interact with the Transfer Family web app. You can specify a custom URL or use the default value.WebAppEndpoint
is the unique URL for your Transfer Family web app. This is the value that you use when you configure Origins on CloudFront.WebAppId
.NextToken
parameter in the output. You can then pass the NextToken
parameter in a subsequent command to continue listing additional web apps.NextToken
parameter in a subsequent command to continue listing additional web apps.AccessEndpoint
is the URL that you provide to your users for them to interact with the Transfer Family web app. You can specify a custom URL or use the default value.WebAppEndpoint
is the unique URL for your Transfer Family web app. This is the value that you use when you configure Origins on CloudFront.UpdateUserResponse
returns the user name and identifier for the request to update a user's properties.UpdateWebAppIdentityCenterConfig
object.WebAppIdentityProviderDetails
object.AccessEndpoint
is the URL that you provide to your users for them to interact with the Transfer Family web app. You can specify a custom URL or use the default value.IdentityCenterConfig
object.1
sets the number of concurrent sessions to 250; 2
sets a value of 500, and so on. AddDataSource
operation.AddTags
operation. Specifies the tags to attach to the domain.AddTags
operation. Specifies the tags to attach to the domain, data source, or application.GetDataSource
operation.GetDataSource
operation.ListDataSources
operation.ListDirectQueryDataSources
API operation. ListTags
operation.RemoveTags
operation.UpdateDataSource
operation.<message-template-id>:<qualifier>
, which is either an actual version number or an Amazon Q Connect managed qualifier $ACTIVE_VERSION
| $LATEST
. If it is not supplied, then $LATEST
is assumed implicitly.MANUAL_SEARCH
.CLAUDE_3_HAIKU_20240307_V1
.SYSTEM
for a default AI Prompt created by Q in Connect or CUSTOMER
for an AI Prompt created by calling AI Prompt creation APIs. CLAUDE_3_HAIKU_20240307_V1
.SYSTEM
for a default AI Prompt created by Q in Connect or CUSTOMER
for an AI Prompt created by calling AI Prompt creation APIs. CLAUDE_3_HAIKU_20240307_V1
.SYSTEM
for a default AI Prompt created by Q in Connect or CUSTOMER
for an AI Prompt created by calling AI Prompt creation APIs. CLAUDE_3_HAIKU_20240307_V1
.SYSTEM
for a default AI Prompt created by Q in Connect or CUSTOMER
for an AI Prompt created by calling AI Prompt creation APIs. ANSWER_RECOMMENDATION
AI Agent.ANSWER_RECOMMENDATION
AI Agent.CLAUDE_3_HAIKU_20240307_V1
CLAUDE_3_HAIKU_20240307_V1
aiGuardrailId
on the request).
"
+ }
+ },
+ "documentation":"
SYSTEM
for a default AI Agent created by Q in Connect or CUSTOMER
for an AI Agent created by calling AI Agent creation APIs. SYSTEM
for a default AI Agent created by Q in Connect or CUSTOMER
for an AI Agent created by calling AI Agent creation APIs. SYSTEM
for a default AI Agent created by Q in Connect or CUSTOMER
for an AI Agent created by calling AI Agent creation APIs. SYSTEM
for a default AI Agent created by Q in Connect or CUSTOMER
for an AI Agent created by calling AI Agent creation APIs. QAppsConfiguration
is enabled in the request. For more information, see Using service-linked roles for Q Apps.QAppsConfiguration
is enabled in the request. For more information, see Using service-linked roles for Q Apps.
"
+ }
+ },
+ "documentation":"CHROME
— Enables the extension for Chromium-based browsers (Google Chrome, Microsoft Edge, Opera, etc.).FIREFOX
— Enables the extension for Mozilla Firefox.CHROME
and FIREFOX
— Enable the extension for Chromium-based browsers and Mozilla Firefox.
RETRIEVAL_MODE
- The default chat mode for an Amazon Q Business application. When this mode is enabled, Amazon Q Business generates responses only from data sources connected to an Amazon Q Business application.CREATOR_MODE
- By selecting this mode, users can choose to generate responses only from the LLM knowledge, without consulting connected data sources, for a chat request.PLUGIN_MODE
- By selecting this mode, users can choose to use plugins in chat.chatMode
parameter determines the chat modes available to Amazon Q Business users:
RETRIEVAL_MODE
- If you choose this mode, Amazon Q generates responses solely from the data sources connected and indexed by the application. If an answer is not found in the data sources or there are no data sources available, Amazon Q will respond with a \"No Answer Found\" message, unless LLM knowledge has been enabled. In that case, Amazon Q will generate a response from the LLM knowledgeCREATOR_MODE
- By selecting this mode, you can choose to generate responses only from the LLM knowledge. You can also attach files and have Amazon Q generate a response based on the data in those files. If the attached files do not contain an answer for the query, Amazon Q will automatically fall back to generating a response from the LLM knowledge.PLUGIN_MODE
- By selecting this mode, users can choose to use plugins in chat to get their responses.CreateDataSource
API with the same client token will create only one data source connector. Status
field value is FAILED
, the ErrorMessage
field contains a description of the error that caused the data source connector to fail.ACTIVE
, the index is ready for use. If the Status
field value is FAILED
, the ErrorMessage
field contains a message that explains why.sourceAttributions
returned by the Chat
, ChatSync
, and ListMessages
API responses.Status
field value is FAILED
, the ErrorMessage
field contains a description of the error that caused the data source connector to fail.PreExtractionHookConfiguration
parameter if you want to apply advanced alterations on the original or raw documents.PostExtractionHookConfiguration
.maxResults
, Amazon Q Business returns a next token as a pagination token to retrieve the next set of attachments.maxResults
, Amazon Q Business returns a next token as a pagination token to retrieve the next set of messages.maxResults
, Amazon Q Business returns a next token as a pagination token to retrieve the next set of messages.
"
+ },
+ "browserExtensionConfiguration":{
+ "shape":"BrowserExtensionConfiguration",
+ "documentation":" The <i>domain origin</i> refers to the <i>base URL</i> for accessing a website including the protocol (<code>http/https</code>), the domain name, and the port number (if specified).</p> <note> <ul> <li> <p>Any values except <code>null</code> submitted as part of this update will replace all previous values.</p> </li> <li> <p>You must only submit a <i>base URL</i> and not a full path. For example, <code>https://docs.aws.amazon.com</code>.</p> </li> </ul> </note>
",
+ "documentation":"s3:ListCallerAccessGrants
permission to use this operation. GrantScope
, using a fragment of the data's S3 path, and S3 Access Grants will return only the grants with a path that contains the path fragment. You can also pass the AllowedByApplication
filter in the request, which returns only the grants authorized for applications, whether the application is the caller's Identity Center application or any other application (ALL
). For more information, see List the caller's access grants in the Amazon S3 User Guide.
",
"endpoint":{
"hostPrefix":"{AccountId}."
},
@@ -2544,7 +2544,7 @@
},
"Operation":{
"shape":"JobOperation",
- "documentation":"s3:ListCallerAccessGrants
permission to use this operation.
"
+ "documentation":"destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.destinationBucket
in the Availability Zone; identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
.
"
},
"CannedAccessControlList":{
"shape":"S3CannedAccessControlList",
@@ -7453,7 +7453,7 @@
},
"SSEAwsKmsKeyId":{
"shape":"KmsKeyArnString",
- "documentation":" destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.destinationBucket
in the Availability Zone identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
. A directory bucket as a destination bucket can be in Availability Zone or Local Zone. 400 Bad Request
error with the error code InvalidRequest
.SSEAlgorithm
with KMS
, you must specify the SSEAwsKmsKeyId
parameter with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. The key alias format of the KMS key isn't supported. To encrypt new object copies in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3
) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS as the bucket default encryption, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you specify server-side encryption settings for new object copies with SSE-KMS, you must make sure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration. true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.AES256
) is supported.AES256
) and server-side encryption with KMS keys (SSE-KMS) (KMS
). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For the Copy operation in Batch Operations, see S3CopyObjectOperation.ARRAY
, BIGINT
, BOOLEAN
, CHAR
, DATE
, DECIMAL
, DOUBLE
, DOUBLE PRECISION
, FLOAT
, FLOAT4
, INT
, INTEGER
, MAP
, NUMERIC
, NUMBER
, REAL
, SMALLINT
, STRING
, TIMESTAMP
, TIMESTAMP_LTZ
, TIMESTAMP_NTZ
, DATETIME
, TINYINT
, VARCHAR
, TEXT
, CHARACTER
.chunkingConfiguration
after you create the data source connector.dataSourceConfiguration
field.chunkingConfiguration
after you create the data source connector.DRAFT
version) of a prompt or a version of it, depending on whether you include the promptVersion
field or not. For more information, see View information about prompts using Prompt management and View information about a version of your prompt in the Amazon Bedrock User Guide.dataSourceType
specified in the content for each document must match the type of the data source that you specify in the header. For more information, see Ingest documents into a knowledge base in real-time in the Amazon Bedrock User Guide.
"
+ }
+ },
+ "documentation":"sourceType
and include the field that corresponds to it.dataSourceType
and include the field that corresponds to it.type
and include the field that corresponds to it.promptType
. For more information, see Inference parameters for foundation models.type
and include the field that corresponds to it.
"
+ },
+ "statusReason":{
+ "shape":"String",
+ "documentation":"IGNORED
.nextToken
field when making another request to return the next batch of results.maxResults
value provided in the request, enter the token returned in the nextToken
field in the response in this field to return the next batch of results.maxResults
value provided in the request, use this token when making another request in the nextToken
field to return the next batch of results.type
and include the field that corresponds to it.
"
}
}
},
@@ -2649,6 +2728,18 @@
}
}
},
+ "DeleteVoiceConnectorExternalSystemsConfigurationRequest":{
+ "type":"structure",
+ "required":["VoiceConnectorId"],
+ "members":{
+ "VoiceConnectorId":{
+ "shape":"NonEmptyString",
+ "documentation":"CONNECT_CALL_TRANSFER_CONNECTOR
- Enables enterprises to integrate Amazon Connect with other voice systems to directly transfer voice calls and metadata without using the public telephone network. They can use Amazon Connect telephony and Interactive Voice Response (IVR) with their existing voice systems to modernize the IVR experience of their existing contact center and their enterprise and branch voice systems. Additionally, enterprises migrating their contact center to Amazon Connect can start with Connect telephony and IVR for immediate modernization ahead of agent migration.CONNECT_ANALYTICS_CONNECTOR
- Enables enterprises to integrate Amazon Connect with other voice systems for real-time and post-call analytics. They can use Amazon Connect Contact Lens with their existing voice systems to provides call recordings, conversational analytics (including contact transcript, sensitive data redaction, content categorization, theme detection, sentiment analysis, real-time alerts, and post-contact summary), and agent performance evaluations (including evaluation forms, automated evaluation, supervisor review) with a rich user experience to display, search and filter customer interactions, and programmatic access to data streams and the data lake. Additionally, enterprises migrating their contact center to Amazon Connect can start with Contact Lens analytics and performance insights ahead of agent migration.UpdateListener
.UpdateListener
.GROUP
). This is used to associate a child resource configuration with a group resource configuration.
"
+ }
+ }
+ },
+ "CreateResourceConfigurationResponse":{
+ "type":"structure",
+ "members":{
+ "allowAssociationToShareableServiceNetwork":{
+ "shape":"Boolean",
+ "documentation":"SINGLE
- A single resource.GROUP
- A group of resources. You must create a group resource configuration before you create a child resource configuration.CHILD
- A single resource that is part of a group resource configuration.ARN
- An Amazon Web Services resource.
"
+ }
+ }
+ },
+ "GetResourceGatewayRequest":{
+ "type":"structure",
+ "required":["resourceGatewayIdentifier"],
+ "members":{
+ "resourceGatewayIdentifier":{
+ "shape":"ResourceGatewayIdentifier",
+ "documentation":"SINGLE
- A single resource.GROUP
- A group of resources.CHILD
- A single resource that is part of a group resource configuration.ARN
- An Amazon Web Services resource.
"
+ }
+ },
+ "documentation":"SINGLE
- A single resource.GROUP
- A group of resources.CHILD
- A single resource that is part of a group resource configuration.ARN
- An Amazon Web Services resource.
"
+ }
+ }
+ },
+ "UpdateResourceGatewayRequest":{
+ "type":"structure",
+ "required":["resourceGatewayIdentifier"],
+ "members":{
+ "resourceGatewayIdentifier":{
+ "shape":"ResourceGatewayIdentifier",
+ "documentation":"SINGLE
- A single resource.GROUP
- A group of resources.CHILD
- A single resource that is part of a group resource configuration.ARN
- An Amazon Web Services resource.FAILED
, COMPLETED
, and STOPPED
. You can request up to 25 model evaluation jobs be deleted in a single request.FAILED
, COMPLETED
, and STOPPED
. You can request up to 25 model evaluation jobs be deleted in a single request.EvaluatorModelConfig
is required for evaluation jobs that use a knowledge base or in model evaluation job that use a model as judge. This model computes all evaluation related metrics.EvaluationDatasetMetricConfig
object is used to specify the prompt datasets, task type, and metric names.EvaluationDatasetMetricConfig
object is used to specify the prompt datasets, task type, and metric names.EvaluationConfig
object. To pass this role to Amazon Bedrock, the caller of this API must have the iam:PassRole
permission. To learn more about the required permissions, see Required permissions.inferenceParams
. To learn more about supported inference parameters for Amazon Bedrock models, see Inference parameters for foundation models.inferenceParams
are specified using JSON. To successfully insert JSON as string make sure that all quotations are properly escaped. For example, \"temperature\":\"0.25\"
key value pair would need to be formatted as \\\"temperature\\\":\\\"0.25\\\"
to successfully accepted in the request.inferenceParams
. To learn more about supported inference parameters for Amazon Bedrock models, see Inference parameters for foundation models.inferenceParams
are specified using JSON. To successfully insert JSON as string make sure that all quotations are properly escaped. For example, \"temperature\":\"0.25\"
key value pair would need to be formatted as \\\"temperature\\\":\\\"0.25\\\"
to successfully accepted in the request.AutomatedEvaluationConfig
to view the required parameters.HumanEvaluationConfig
to view the required parameters.AutomatedEvaluationConfig
or HumanEvaluationConfig
object.\"Builtin.Accuracy\"
, \"Builtin.Robustness\"
, and \"Builtin.Toxicity\"
. In human-based model evaluation jobs the array of strings must match the name
parameter specified in HumanEvaluationCustomMetric
. Builtin.ContextRelevance
\", \"Builtin.ContextConverage
\".Builtin.Correctness
\", \"Builtin.Completeness
\", \"Builtin.Helpfulness
\", \"Builtin.LogicalCoherence
\", \"Builtin.Faithfulness
\", \"Builtin.Harmfulness
\", \"Builtin.Stereotyping
\", \"Builtin.Refusal
\".Builtin.Accuracy
\", \"Builtin.Robustness
\", and \"Builtin.Toxicity
\". In model evaluation jobs that use a LLM as judge you can specify \"Builtin.Correctness
\", \"Builtin.Completeness\"
, \"Builtin.Faithfulness\"
, \"Builtin.Helpfulness
\", \"Builtin.Coherence
\", \"Builtin.Relevance
\", \"Builtin.FollowingInstructions
\", \"Builtin.ProfessionalStyleAndTone
\", You can also specify the following responsible AI related metrics only for model evaluation job that use a LLM as judge \"Builtin.Harmfulness
\", \"Builtin.Stereotyping
\", and \"Builtin.Refusal
\".name
parameter specified in HumanEvaluationCustomMetric
.EvaluatorModelConfig
is required for evaluation jobs that use a knowledge base or in model evaluation job that use a model as judge. This model computes all evaluation related metrics.RetrieveAndGenerate
to generate responses while using an external source.retrieveAndGenerate
function.retrieveAndGenerate
function.textInferenceConfig
structure for a knowledge base. This allows you to provide custom model parameters specific to the language model being used.RetrieveAndGenerate
function.HYBRID
search using both vector embeddings and raw text, or SEMANTIC
search using only vector embeddings. For other vector store configurations, only SEMANTIC
search is available.\"equals\": { \"key\": \"animal\", \"value\": \"cat\" }
\"notEquals\": { \"key\": \"animal\", \"value\": \"cat\" }
\"greaterThan\": { \"key\": \"year\", \"value\": 1989 }
\"greaterThanOrEquals\": { \"key\": \"year\", \"value\": 1989 }
\"lessThan\": { \"key\": \"year\", \"value\": 1989 }
\"lessThanOrEquals\": { \"key\": \"year\", \"value\": 1989 }
\"in\": { \"key\": \"animal\", \"value\": [\"cat\", \"dog\"] }
\"notIn\": { \"key\": \"animal\", \"value\": [\"cat\", \"dog\"] }
\"startsWith\": { \"key\": \"animal\", \"value\": \"ca\" }
[\"dog\", \"cat\"]
): \"listContains\": { \"key\": \"animals\", \"value\": \"cat\" }
\"stringContains\": { \"key\": \"animal\", \"value\": \"at\" }
[\"dog\", \"cat\"]
): \"stringContains\": { \"key\": \"animals\", \"value\": \"at\" }
EXTERNAL_SOURCES
, then currently only Claude 3 Sonnet models for knowledge bases are supported.retrieveAndGenerate
function.RetrieveAndGenerate
function.<repository location url>/repository_name
.StartQueryMonitorTopContributors
. Use the scope ID that was returned for your account by CreateScope
.StartQueryWorkloadInsightsTopContributors
. Use the scope ID that was returned for your account by CreateScope
.StartQueryWorkloadInsightsTopContributorsData
. Use the scope ID that was returned for your account by CreateScope
.SUCCEEDED
before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryMonitorTopContributors
.SUCCEEDED
before you review the results.SUCCEEDED
before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributors
.SUCCEEDED
before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributorsData
.STATUS
parameter.
"
+ },
+ "localResources":{
+ "shape":"MonitorLocalResources",
+ "documentation":"PENDING
: The monitor is in the process of being created.ACTIVE
: The monitor is active.INACTIVE
: The monitor is inactive.ERROR
: Monitor creation failed due to an error.DELETING
: The monitor is in the process of being deleted.SUCCEEDED
, IN_PROGRESS
, or FAILED
.
"
+ },
+ "localResources":{
+ "shape":"MonitorLocalResources",
+ "documentation":"PENDING
: The monitor is in the process of being created.ACTIVE
: The monitor is active.INACTIVE
: The monitor is inactive.ERROR
: Monitor creation failed due to an error.DELETING
: The monitor is in the process of being deleted.SUCCEEDED
before you review the results.
"
+ }
+ }
+ },
+ "GetQueryStatusWorkloadInsightsTopContributorsDataInput":{
+ "type":"structure",
+ "required":[
+ "scopeId",
+ "queryId"
+ ],
+ "members":{
+ "scopeId":{
+ "shape":"ScopeId",
+ "documentation":"QUEUED
: The query is scheduled to run.RUNNING
: The query is in progress but not complete.SUCCEEDED
: The query completed sucessfully.FAILED
: The query failed due to an error.CANCELED
: The query was canceled.CreateScope
API call.
"
+ }
+ }
+ },
+ "GetQueryStatusWorkloadInsightsTopContributorsInput":{
+ "type":"structure",
+ "required":[
+ "scopeId",
+ "queryId"
+ ],
+ "members":{
+ "scopeId":{
+ "shape":"ScopeId",
+ "documentation":"QUEUED
: The query is scheduled to run.RUNNING
: The query is in progress but not complete.SUCCEEDED
: The query completed sucessfully.FAILED
: The query failed due to an error.CANCELED
: The query was canceled.SUCCEEDED
before you review the results.
"
+ }
+ }
+ },
+ "GetScopeInput":{
+ "type":"structure",
+ "required":["scopeId"],
+ "members":{
+ "scopeId":{
+ "shape":"GetScopeInputScopeIdString",
+ "documentation":"QUEUED
: The query is scheduled to run.RUNNING
: The query is in progress but not complete.SUCCEEDED
: The query completed sucessfully.FAILED
: The query failed due to an error.CANCELED
: The query was canceled.CreateScope
API call.CreateScope
API call.SUCCEEDED
, IN_PROGRESS
, or FAILED
.
",
+ "location":"querystring",
+ "locationName":"monitorStatus"
+ }
+ }
+ },
+ "ListMonitorsOutput":{
+ "type":"structure",
+ "required":["monitors"],
+ "members":{
+ "monitors":{
+ "shape":"MonitorList",
+ "documentation":"PENDING
: The monitor is in the process of being created.ACTIVE
: The monitor is active.INACTIVE
: The monitor is inactive.ERROR
: Monitor creation failed due to an error.DELETING
: The monitor is in the process of being deleted.AWS::EC2::VPC
AWS::AvailabilityZone
or AWS::EC2::Subnet
.AWS::EC2::VPC
AWS::AvailabilityZone
, AWS::EC2::Subnet
, or AWS::AWSService
.
"
+ }
+ },
+ "documentation":"PENDING
: The monitor is in the process of being created.ACTIVE
: The monitor is active.INACTIVE
: The monitor is inactive.ERROR
: Monitor creation failed due to an error.DELETING
: The monitor is in the process of being deleted.
"
+ },
+ "remoteVpcId":{
+ "shape":"VpcId",
+ "documentation":"INTRA_AZ
: Top contributor network flows within a single Availability ZoneINTER_AZ
: Top contributor network flows between Availability ZonesINTER_VPC
: Top contributor network flows between VPCsAWS_SERVICES
: Top contributor network flows to or from Amazon Web Services servicesUNCLASSIFIED
: Top contributor network flows that do not have a bucket classificationSUCCEEDED
, IN_PROGRESS
, or FAILED
.
"
+ },
+ "limit":{
+ "shape":"Limit",
+ "documentation":"INTRA_AZ
: Top contributor network flows within a single Availability ZoneINTER_AZ
: Top contributor network flows between Availability ZonesINTER_VPC
: Top contributor network flows between VPCsAWS_SERVICES
: Top contributor network flows to or from Amazon Web Services servicesUNCLASSIFIED
: Top contributor network flows that do not have a bucket classification
"
+ }
+ }
+ },
+ "StartQueryWorkloadInsightsTopContributorsDataOutput":{
+ "type":"structure",
+ "required":["queryId"],
+ "members":{
+ "queryId":{
+ "shape":"String",
+ "documentation":"INTRA_AZ
: Top contributor network flows within a single Availability ZoneINTER_AZ
: Top contributor network flows between Availability ZonesINTER_VPC
: Top contributor network flows between VPCsAWS_SERVICES
: Top contributor network flows to or from Amazon Web Services servicesUNCLASSIFIED
: Top contributor network flows that do not have a bucket classificationCreateScope
API call.
"
+ },
+ "limit":{
+ "shape":"Limit",
+ "documentation":"INTRA_AZ
: Top contributor network flows within a single Availability ZoneINTER_AZ
: Top contributor network flows between Availability ZonesINTER_VPC
: Top contributor network flows between VPCsAWS_SERVICES
: Top contributor network flows to or from Amazon Web Services servicesUNCLASSIFIED
: Top contributor network flows that do not have a bucket classificationACCOUNT
because a target is currently a single Amazon Web Services account.
"
+ },
+ "localResources":{
+ "shape":"MonitorLocalResources",
+ "documentation":"PENDING
: The monitor is in the process of being created.ACTIVE
: The monitor is active.INACTIVE
: The monitor is inactive.ERROR
: Monitor creation failed due to an error.DELETING
: The monitor is in the process of being deleted.SUCCEEDED
, IN_PROGRESS
, or FAILED
.PutPermission
permits the specified Amazon Web Services account or Amazon Web Services organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account. PutPermission
once for each of these accounts. Or, if all the accounts are members of the same Amazon Web Services organization, you can run PutPermission
once specifying Principal
as \"*\" and specifying the Amazon Web Services organization ID in Condition
, to grant permissions to all accounts in that organization.RoleArn
with proper permissions when they use PutTarget
to add your account's event bus as a target. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.PutPermission
permits the specified Amazon Web Services account or Amazon Web Services organization to put events to the specified event bus. Amazon EventBridge rules in your account are triggered by these events arriving to an event bus in your account. PutPermission
once for each of these accounts. Or, if all the accounts are members of the same Amazon Web Services organization, you can run PutPermission
once specifying Principal
as \"*\" and specifying the Amazon Web Services organization ID in Condition
, to grant permissions to all accounts in that organization.RoleArn
with proper permissions when they use PutTarget
to add your account's event bus as a target. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.PutRule
command. If you omit arguments in PutRule
, the old values for those arguments are not kept. Instead, they are replaced with null values.PutRule
operation and assign tags, you must have both the events:PutRule
and events:TagResource
permissions.PutRule
operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.PutRule
command. If you omit arguments in PutRule
, the old values for those arguments are not kept. Instead, they are replaced with null values.PutRule
operation and assign tags, you must have both the events:PutRule
and events:TagResource
permissions.PutRule
operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.ConnectionOAuthClientResponseParameters
object that contains details about the client parameters returned when OAuth is specified as the authorization type.CreateConnectionBasicAuthRequestParameters
object that contains the Basic authorization parameters to use for the connection.CreateConnectionOAuthRequestParameters
object that contains the OAuth authorization parameters to use for the connection.CreateConnectionApiKeyAuthRequestParameters
object that contains the API key authorization parameters to use for the connection.ConnectionHttpParameters
object that contains the API key authorization parameters to use for the connection. Note that if you include additional parameters for the target of a rule via HttpParameters
, including query strings, the parameters added for the connection take precedence.HttpParameters
, including query strings, the parameters added for the connection take precedence.AuthorizationType
you specify.CreateConnectionOAuthClientRequestParameters
object that contains the client parameters for OAuth authorization.ConnectionHttpParameters
object that contains details about the additional parameters to use for the connection.CreateConnectionAuthRequestParameters
object that contains the authorization parameters to use to authorize with the endpoint. AuthorizationType
you specify.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.ApiDestination
objects that include information about an API destination.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is returned, there are more results available. The value of nextToken
is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is returned, there are more results available. The value of nextToken
is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.nextToken
is a unique pagination token for each page. To retrieve the next page of results, make the call again using the returned token. Keep all other arguments unchanged.HTTP 400 InvalidToken
error.RetryPolicy
object that contains the retry policy configuration to use for the dead-letter queue.UpdateConnectionBasicAuthRequestParameters
object that contains the authorization parameters for Basic authorization.UpdateConnectionOAuthRequestParameters
object that contains the authorization parameters for OAuth authorization.UpdateConnectionApiKeyAuthRequestParameters
object that contains the authorization parameters for API key authorization.ConnectionHttpParameters
object that contains the additional parameters to use for the connection.UpdateConnectionOAuthClientRequestParameters
object that contains the client parameters to use for the connection when OAuth is specified as the authorization type.CreateSnapshot
permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied
exception.Matching
to true.AsOf
date.InvoiceUnitRule
object used to create invoice units. OR
condition (match any) of the specified invoice unit names. OR
condition (match any) of the specified accounts. This filter only matches the specified accounts on the invoice receivers of the invoice units. OR
condition (match any) of the specified accounts. The specified account IDs are matched with either the receiver or the linked accounts in the rules. InvoiceReceivers
, Names
, and Accounts
. AsOf
time is set to before it was deleted. If an AsOf
is not provided, the default value is the current time. InvoiceUnitRule
object used the categorize invoice units. LINKED_ACCOUNT
IDs where charges are included within the invoice unit.
This is used to categorize the invoice unit. Values are Amazon Web Services account IDs. Currently, the only supported rule is LINKED_ACCOUNT
.
An optional input to the list API. If multiple filters are specified, the returned list will be a configuration that match all of the provided filters. Supported filter types are InvoiceReceivers
, Names
, and Accounts
.
The next token used to indicate where the returned list should start from.
" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"The maximum number of invoice units that can be returned.
" + }, + "AsOf":{ + "shape":"AsOfTimestamp", + "documentation":" The state of an invoice unit at a specified time. You can see legacy invoice units that are currently deleted if the AsOf
time is set to before it was deleted. If an AsOf
is not provided, the default value is the current time.
An invoice unit is a set of mutually exclusive accounts that correspond to your business entity.
" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"The next token used to indicate where the returned list should start from.
" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"TagrisArn", + "documentation":"The Amazon Resource Name (ARN) of tags to list.
" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "ResourceTags":{ + "shape":"ResourceTagList", + "documentation":"Adds a tag to a resource.
" + } + } + }, + "MaxResultsInteger":{ + "type":"integer", + "box":true, + "max":500, + "min":1 + }, + "NextTokenString":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\S\\s]*" + }, + "ProfileList":{ + "type":"list", + "member":{"shape":"InvoiceProfile"} + }, + "ReceiverAddress":{ + "type":"structure", + "members":{ + "AddressLine1":{ + "shape":"BasicString", + "documentation":"The first line of the address.
" + }, + "AddressLine2":{ + "shape":"BasicString", + "documentation":"The second line of the address, if applicable.
" + }, + "AddressLine3":{ + "shape":"BasicString", + "documentation":"The third line of the address, if applicable.
" + }, + "DistrictOrCounty":{ + "shape":"BasicString", + "documentation":"The district or country the address is located in.
" + }, + "City":{ + "shape":"BasicString", + "documentation":"The city that the address is in.
" + }, + "StateOrRegion":{ + "shape":"BasicString", + "documentation":"The state, region, or province the address is located.
" + }, + "CountryCode":{ + "shape":"BasicString", + "documentation":"The country code for the country the address is in.
" + }, + "CompanyName":{ + "shape":"BasicString", + "documentation":"A unique company name.
" + }, + "PostalCode":{ + "shape":"BasicString", + "documentation":"The postal code associated with the address.
" + } + }, + "documentation":"The details of the address associated with the receiver.
", + "sensitive":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"BasicString"}, + "resourceName":{ + "shape":"InvoiceUnitArnString", + "documentation":"The resource could not be found.
" + } + }, + "documentation":"The resource could not be found.
", + "exception":true + }, + "ResourceTag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"ResourceTagKey", + "documentation":"The object key of your of your resource tag.
" + }, + "Value":{ + "shape":"ResourceTagValue", + "documentation":"The specific value of the resource tag.
" + } + }, + "documentation":"The tag structure that contains a tag key and value.
" + }, + "ResourceTagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceTagKeyList":{ + "type":"list", + "member":{"shape":"ResourceTagKey"}, + "max":200, + "min":0 + }, + "ResourceTagList":{ + "type":"list", + "member":{"shape":"ResourceTag"}, + "max":200, + "min":0 + }, + "ResourceTagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "SensitiveBasicString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"\\S+", + "sensitive":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"BasicString"} + }, + "documentation":"The request was rejected because it attempted to create resources beyond the current Amazon Web Services account limits. The error message describes the limit exceeded.
", + "exception":true + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "ResourceTags" + ], + "members":{ + "ResourceArn":{ + "shape":"TagrisArn", + "documentation":"The Amazon Resource Name (ARN) of the tags.
" + }, + "ResourceTags":{ + "shape":"ResourceTagList", + "documentation":"Adds a tag to a resource.
" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagrisArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws[-a-z0-9]*:[a-z0-9]+:[-a-z0-9]*:[0-9]{12}:[-a-zA-Z0-9/:_]+" + }, + "TaxInheritanceDisabledFlag":{"type":"boolean"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"BasicString"} + }, + "documentation":"The request was denied due to request throttling.
", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "ResourceTagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"TagrisArn", + "documentation":"The Amazon Resource Name (ARN) to untag.
" + }, + "ResourceTagKeys":{ + "shape":"ResourceTagKeyList", + "documentation":"Keys for the tags to be removed.
" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateInvoiceUnitRequest":{ + "type":"structure", + "required":["InvoiceUnitArn"], + "members":{ + "InvoiceUnitArn":{ + "shape":"InvoiceUnitArnString", + "documentation":"The ARN to identify an invoice unit. This information can't be modified or deleted.
" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"The assigned description for an invoice unit. This information can't be modified or deleted.
" + }, + "TaxInheritanceDisabled":{ + "shape":"TaxInheritanceDisabledFlag", + "documentation":"Whether the invoice unit based tax inheritance is/ should be enabled or disabled.
", + "box":true + }, + "Rule":{ + "shape":"InvoiceUnitRule", + "documentation":"The InvoiceUnitRule
object used to update invoice units.
The ARN to identify an invoice unit. This information can't be modified or deleted.
" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"BasicString"}, + "resourceName":{ + "shape":"InvoiceUnitArnString", + "documentation":"You don't have sufficient access to perform this action.
" + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"You don't have sufficient access to perform this action.
" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"The input fails to satisfy the constraints specified by an Amazon Web Services service.
" + } + }, + "documentation":"The input fails to satisfy the constraints specified by an Amazon Web Services service.
", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"BasicString", + "documentation":"The input fails to satisfy the constraints specified by an Amazon Web Services service.
" + }, + "message":{ + "shape":"BasicString", + "documentation":"The input fails to satisfy the constraints specified by an Amazon Web Services service.
" + } + }, + "documentation":"The input fails to satisfy the constraints specified by an Amazon Web Services service.
" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "nonMemberPresent", + "maxAccountsExceeded", + "maxInvoiceUnitsExceeded", + "duplicateInvoiceUnit", + "mutualExclusionError", + "accountMembershipError", + "taxSettingsError", + "expiredNextToken", + "invalidNextToken", + "invalidInput", + "fieldValidationFailed", + "cannotParse", + "unknownOperation", + "other" + ] + } + }, + "documentation":"Amazon Web Services Invoice Configuration
You can use Amazon Web Services Invoice Configuration APIs to programmatically create, update, delete, get, and list invoice units. You can also programmatically fetch the information of the invoice receiver. For example, business legal name, address, and invoicing contacts.
You can use Amazon Web Services Invoice Configuration to receive separate Amazon Web Services invoices based your organizational needs. By using Amazon Web Services Invoice Configuration, you can configure invoice units that are groups of Amazon Web Services accounts that represent your business entities, and receive separate invoices for each business entity. You can also assign a unique member or payer account as the invoice receiver for each invoice unit. As you create new accounts within your Organizations using Amazon Web Services Invoice Configuration APIs, you can automate the creation of new invoice units and subsequently automate the addition of new accounts to your invoice units.
Service endpoint
You can use the following endpoints for Amazon Web Services Invoice Configuration:
https://invoicing.us-east-1.api.aws
Principal ID of the user.
", + "locationName":"principalId" + }, + "UserName":{ + "shape":"String", + "documentation":"Name of the user.
", + "locationName":"userName" + }, + "UserType":{ + "shape":"String", + "documentation":"Type of the user.
", + "locationName":"userType" + } + }, + "documentation":"Contains information about the access keys.
" + }, "AccessKeyDetails":{ "type":"structure", "members":{ @@ -1268,6 +1289,23 @@ }, "documentation":"Contains information about the access keys.
" }, + "Account":{ + "type":"structure", + "required":["Uid"], + "members":{ + "Uid":{ + "shape":"String", + "documentation":"ID of the member's Amazon Web Services account
", + "locationName":"uid" + }, + "Name":{ + "shape":"String", + "documentation":"Name of the member's Amazon Web Services account.
", + "locationName":"account" + } + }, + "documentation":"Contains information about the account.
" + }, "AccountDetail":{ "type":"structure", "required":[ @@ -1420,6 +1458,38 @@ }, "documentation":"Contains information about actions.
" }, + "Actor":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"String", + "documentation":"ID of the threat actor.
", + "locationName":"id" + }, + "User":{ + "shape":"User", + "documentation":"Contains information about the user credentials used by the threat actor.
", + "locationName":"user" + }, + "Session":{ + "shape":"Session", + "documentation":"Contains information about the user session where the activity initiated.
", + "locationName":"session" + } + }, + "documentation":"Information about the actors involved in an attack sequence.
" + }, + "ActorIds":{ + "type":"list", + "member":{"shape":"String"}, + "max":400 + }, + "Actors":{ + "type":"list", + "member":{"shape":"Actor"}, + "max":400 + }, "AddonDetails":{ "type":"structure", "members":{ @@ -1609,6 +1679,26 @@ "NONE" ] }, + "AutonomousSystem":{ + "type":"structure", + "required":[ + "Name", + "Number" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"Name associated with the Autonomous System (AS).
", + "locationName":"name" + }, + "Number":{ + "shape":"Integer", + "documentation":"The unique number that identifies the Autonomous System (AS).
", + "locationName":"number" + } + }, + "documentation":"Contains information about the Autonomous System (AS) associated with the network endpoints involved in an attack sequence.
" + }, "AwsApiCallAction":{ "type":"structure", "members":{ @@ -3287,6 +3377,11 @@ "shape":"Anomaly", "documentation":"The details about the anomalous activity that caused GuardDuty to generate the finding.
", "locationName":"anomaly" + }, + "Sequence":{ + "shape":"Sequence", + "documentation":"The details about the attack sequence.
", + "locationName":"sequence" } }, "documentation":"Contains information about the detected behavior.
" @@ -3641,6 +3736,93 @@ }, "documentation":"Describes the configuration of scanning EBS volumes as a data source.
" }, + "Ec2Instance":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "documentation":"The availability zone of the Amazon EC2 instance. For more information, see Availability zones in the Amazon EC2 User Guide.
", + "locationName":"availabilityZone" + }, + "ImageDescription":{ + "shape":"String", + "documentation":"The image description of the Amazon EC2 instance.
", + "locationName":"imageDescription" + }, + "InstanceState":{ + "shape":"String", + "documentation":"The state of the Amazon EC2 instance. For more information, see Amazon EC2 instance state changes in the Amazon EC2 User Guide.
", + "locationName":"instanceState" + }, + "IamInstanceProfile":{"shape":"IamInstanceProfile"}, + "InstanceType":{ + "shape":"String", + "documentation":"Type of the Amazon EC2 instance.
", + "locationName":"instanceType" + }, + "OutpostArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the Amazon Web Services Outpost. This shows applicable Amazon Web Services Outposts instances.
", + "locationName":"outpostArn" + }, + "Platform":{ + "shape":"String", + "documentation":"The platform of the Amazon EC2 instance.
", + "locationName":"platform" + }, + "ProductCodes":{ + "shape":"ProductCodes", + "documentation":"The product code of the Amazon EC2 instance.
", + "locationName":"productCodes" + }, + "Ec2NetworkInterfaceUids":{ + "shape":"Ec2NetworkInterfaceUids", + "documentation":"The ID of the network interface.
", + "locationName":"ec2NetworkInterfaceUids" + } + }, + "documentation":"Details about the potentially impacted Amazon EC2 instance resource.
" + }, + "Ec2NetworkInterface":{ + "type":"structure", + "members":{ + "Ipv6Addresses":{ + "shape":"Ipv6Addresses", + "documentation":"A list of IPv6 addresses for the Amazon EC2 instance.
", + "locationName":"ipv6Addresses" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddresses", + "documentation":"Other private IP address information of the Amazon EC2 instance.
", + "locationName":"privateIpAddresses" + }, + "PublicIp":{ + "shape":"String", + "documentation":"The public IP address of the Amazon EC2 instance.
", + "locationName":"publicIp" + }, + "SecurityGroups":{ + "shape":"SecurityGroups", + "documentation":"The security groups associated with the Amazon EC2 instance.
", + "locationName":"securityGroups" + }, + "SubNetId":{ + "shape":"String", + "documentation":"The subnet ID of the Amazon EC2 instance.
", + "locationName":"subNetId" + }, + "VpcId":{ + "shape":"String", + "documentation":"The VPC ID of the Amazon EC2 instance.
", + "locationName":"vpcId" + } + }, + "documentation":"Contains information about the elastic network interface of the Amazon EC2 instance.
" + }, + "Ec2NetworkInterfaceUids":{ + "type":"list", + "member":{"shape":"String"} + }, "EcsClusterDetails":{ "type":"structure", "members":{ @@ -3806,6 +3988,11 @@ "members":{ } }, + "EndpointIds":{ + "type":"list", + "member":{"shape":"String"}, + "max":400 + }, "Eq":{ "type":"list", "member":{"shape":"String"} @@ -4038,6 +4225,11 @@ "shape":"String", "documentation":"The time and date when the finding was last updated.
", "locationName":"updatedAt" + }, + "AssociatedAttackSequenceArn":{ + "shape":"String", + "documentation":"Amazon Resource Name (ARN) associated with the attack sequence finding.
", + "locationName":"associatedAttackSequenceArn" } }, "documentation":"Contains information about the finding that is generated when abnormal or suspicious activity is detected.
" @@ -4072,6 +4264,16 @@ "SIX_HOURS" ] }, + "FindingResourceType":{ + "type":"string", + "enum":[ + "EC2_INSTANCE", + "EC2_NETWORK_INTERFACE", + "S3_BUCKET", + "S3_OBJECT", + "ACCESS_KEY" + ] + }, "FindingStatisticType":{ "type":"string", "enum":["COUNT_BY_SEVERITY"] @@ -5017,6 +5219,64 @@ }, "documentation":"Contains information about the impersonated user.
" }, + "Indicator":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"IndicatorType", + "documentation":"Specific indicator keys observed in the attack sequence.
", + "locationName":"key" + }, + "Values":{ + "shape":"IndicatorValues", + "documentation":"Values associated with each indicator key. For example, if the indicator key is SUSPICIOUS_NETWORK
, then the value will be the name of the network. If the indicator key is ATTACK_TACTIC
, then the value will be one of the MITRE tactics.
For more information about the values associated with the key, see GuardDuty Extended Threat Detection in the GuardDuty User Guide.
", + "locationName":"values" + }, + "Title":{ + "shape":"IndicatorTitle", + "documentation":"Title describing the indicator.
", + "locationName":"title" + } + }, + "documentation":"Contains information about the indicators that include a set of signals observed in an attack sequence.
" + }, + "IndicatorTitle":{ + "type":"string", + "max":256, + "min":1 + }, + "IndicatorType":{ + "type":"string", + "enum":[ + "SUSPICIOUS_USER_AGENT", + "SUSPICIOUS_NETWORK", + "MALICIOUS_IP", + "TOR_IP", + "ATTACK_TACTIC", + "HIGH_RISK_API", + "ATTACK_TECHNIQUE", + "UNUSUAL_API_FOR_ACCOUNT", + "UNUSUAL_ASN_FOR_ACCOUNT", + "UNUSUAL_ASN_FOR_USER" + ] + }, + "IndicatorValueString":{ + "type":"string", + "max":256, + "min":1 + }, + "IndicatorValues":{ + "type":"list", + "member":{"shape":"IndicatorValueString"}, + "max":400, + "min":1 + }, + "Indicators":{ + "type":"list", + "member":{"shape":"Indicator"}, + "max":400 + }, "InstanceArn":{ "type":"string", "pattern":"^arn:(aws|aws-cn|aws-us-gov):[a-z]+:[a-z]+(-[0-9]+|-[a-z]+)+:([0-9]{12}):[a-z\\-]+\\/[a-zA-Z0-9]*$" @@ -6543,6 +6803,13 @@ "type":"list", "member":{"shape":"String"} }, + "MfaStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Name":{ "type":"string", "max":300, @@ -6552,6 +6819,18 @@ "type":"list", "member":{"shape":"String"} }, + "NetworkConnection":{ + "type":"structure", + "required":["Direction"], + "members":{ + "Direction":{ + "shape":"NetworkDirection", + "documentation":"The direction in which the network traffic is flowing.
", + "locationName":"direction" + } + }, + "documentation":"Contains information about the network connection.
" + }, "NetworkConnectionAction":{ "type":"structure", "members":{ @@ -6598,6 +6877,92 @@ }, "documentation":"Contains information about the NETWORK_CONNECTION action described in the finding.
" }, + "NetworkDirection":{ + "type":"string", + "enum":[ + "INBOUND", + "OUTBOUND" + ] + }, + "NetworkEndpoint":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"String", + "documentation":"The ID of the network endpoint.
", + "locationName":"id" + }, + "Ip":{ + "shape":"String", + "documentation":"The IP address associated with the network endpoint.
", + "locationName":"ip" + }, + "Domain":{ + "shape":"String", + "documentation":"The domain information for the network endpoint.
", + "locationName":"domain" + }, + "Port":{ + "shape":"Integer", + "documentation":"The port number associated with the network endpoint.
", + "locationName":"port" + }, + "Location":{ + "shape":"NetworkGeoLocation", + "documentation":"Information about the location of the network endpoint.
", + "locationName":"location" + }, + "AutonomousSystem":{ + "shape":"AutonomousSystem", + "documentation":"The Autonomous System (AS) of the network endpoint.
", + "locationName":"autonomousSystem" + }, + "Connection":{ + "shape":"NetworkConnection", + "documentation":"Information about the network connection.
", + "locationName":"connection" + } + }, + "documentation":"Contains information about network endpoints that were observed in the attack sequence.
" + }, + "NetworkEndpoints":{ + "type":"list", + "member":{"shape":"NetworkEndpoint"}, + "max":400 + }, + "NetworkGeoLocation":{ + "type":"structure", + "required":[ + "City", + "Country", + "Latitude", + "Longitude" + ], + "members":{ + "City":{ + "shape":"String", + "documentation":"The name of the city.
", + "locationName":"city" + }, + "Country":{ + "shape":"String", + "documentation":"The name of the country.
", + "locationName":"country" + }, + "Latitude":{ + "shape":"Double", + "documentation":"The latitude information of the endpoint location.
", + "locationName":"lat" + }, + "Longitude":{ + "shape":"Double", + "documentation":"The longitude information of the endpoint location.
", + "locationName":"lon" + } + }, + "documentation":"Contains information about network endpoint location.
" + }, "NetworkInterface":{ "type":"structure", "members":{ @@ -7318,6 +7683,53 @@ }, "documentation":"Describes the public access policies that apply to the S3 bucket.
" }, + "PublicAccessConfiguration":{ + "type":"structure", + "members":{ + "PublicAclAccess":{ + "shape":"PublicAccessStatus", + "documentation":"Indicates whether or not there is a setting that allows public access to the Amazon S3 buckets through access control lists (ACLs).
", + "locationName":"publicAclAccess" + }, + "PublicPolicyAccess":{ + "shape":"PublicAccessStatus", + "documentation":"Indicates whether or not there is a setting that allows public access to the Amazon S3 bucket policy.
", + "locationName":"publicPolicyAccess" + }, + "PublicAclIgnoreBehavior":{ + "shape":"PublicAclIgnoreBehavior", + "documentation":"Indicates whether or not there is a setting that ignores all public access control lists (ACLs) on the Amazon S3 bucket and the objects that it contains.
", + "locationName":"publicAclIgnoreBehavior" + }, + "PublicBucketRestrictBehavior":{ + "shape":"PublicBucketRestrictBehavior", + "documentation":"Indicates whether or not there is a setting that restricts access to the bucket with specified policies.
", + "locationName":"publicBucketRestrictBehavior" + } + }, + "documentation":"Describes public access policies that apply to the Amazon S3 bucket.
For information about each of the following settings, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
" + }, + "PublicAccessStatus":{ + "type":"string", + "enum":[ + "BLOCKED", + "ALLOWED" + ] + }, + "PublicAclIgnoreBehavior":{ + "type":"string", + "enum":[ + "IGNORED", + "NOT_IGNORED" + ] + }, + "PublicBucketRestrictBehavior":{ + "type":"string", + "enum":[ + "RESTRICTED", + "NOT_RESTRICTED" + ] + }, "PublishingStatus":{ "type":"string", "enum":[ @@ -7431,7 +7843,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"Information about the tag-key value pair.
", + "documentation":"Information about the tag key-value pair.
", "locationName":"tags" } }, @@ -7593,6 +8005,37 @@ "type":"string", "pattern":"^arn:[A-Za-z-]+:[A-Za-z0-9]+:[A-Za-z0-9-]+:\\d+:(([A-Za-z0-9-]+)[:\\/])?[A-Za-z0-9-]*$" }, + "ResourceData":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"S3Bucket", + "documentation":"Contains information about the Amazon S3 bucket.
", + "locationName":"s3Bucket" + }, + "Ec2Instance":{ + "shape":"Ec2Instance", + "documentation":"Contains information about the Amazon EC2 instance.
", + "locationName":"ec2Instance" + }, + "AccessKey":{ + "shape":"AccessKey", + "documentation":"Contains information about the IAM access key details of a user that involved in the GuardDuty finding.
", + "locationName":"accessKey" + }, + "Ec2NetworkInterface":{ + "shape":"Ec2NetworkInterface", + "documentation":"Contains information about the elastic network interface of the Amazon EC2 instance.
", + "locationName":"ec2NetworkInterface" + }, + "S3Object":{ + "shape":"S3Object", + "documentation":"Contains information about the Amazon S3 object.
", + "locationName":"s3Object" + } + }, + "documentation":"Contains information about the Amazon Web Services resource that is associated with the activity that prompted GuardDuty to generate a finding.
" + }, "ResourceDetails":{ "type":"structure", "members":{ @@ -7665,6 +8108,71 @@ "EC2" ] }, + "ResourceUids":{ + "type":"list", + "member":{"shape":"String"}, + "max":400 + }, + "ResourceV2":{ + "type":"structure", + "required":[ + "Uid", + "ResourceType" + ], + "members":{ + "Uid":{ + "shape":"String", + "documentation":"The unique identifier of the resource.
", + "locationName":"uid" + }, + "Name":{ + "shape":"String", + "documentation":"The name of the resource.
", + "locationName":"name" + }, + "AccountId":{ + "shape":"String", + "documentation":"The Amazon Web Services account ID to which the resource belongs.
", + "locationName":"accountId" + }, + "ResourceType":{ + "shape":"FindingResourceType", + "documentation":"The type of the Amazon Web Services resource.
", + "locationName":"resourceType" + }, + "Region":{ + "shape":"String", + "documentation":"The Amazon Web Services Region where the resource belongs.
", + "locationName":"region" + }, + "Service":{ + "shape":"String", + "documentation":"The Amazon Web Services service of the resource.
", + "locationName":"service" + }, + "CloudPartition":{ + "shape":"String", + "documentation":"The cloud partition within the Amazon Web Services Region to which the resource belongs.
", + "locationName":"cloudPartition" + }, + "Tags":{ + "shape":"Tags", + "documentation":"Contains information about the tags associated with the resource.
", + "locationName":"tags" + }, + "Data":{ + "shape":"ResourceData", + "documentation":"Contains information about the Amazon Web Services resource associated with the activity that prompted GuardDuty to generate a finding.
", + "locationName":"data" + } + }, + "documentation":"Contains information about the Amazon Web Services resource that is associated with the GuardDuty finding.
" + }, + "Resources":{ + "type":"list", + "member":{"shape":"ResourceV2"}, + "max":400 + }, "RuntimeContext":{ "type":"structure", "members":{ @@ -7812,6 +8320,62 @@ }, "documentation":"Information about the process and any required context values for a specific finding.
" }, + "S3Bucket":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "documentation":"The owner ID of the associated S3Amazon S3bucket.
", + "locationName":"ownerId" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The timestamp at which the Amazon S3 bucket was created.
", + "locationName":"createdAt" + }, + "EncryptionType":{ + "shape":"String", + "documentation":"The type of encryption used for the Amazon S3 buckets and its objects. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
", + "locationName":"encryptionType" + }, + "EncryptionKeyArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the encryption key that is used to encrypt the Amazon S3 bucket and its objects.
", + "locationName":"encryptionKeyArn" + }, + "EffectivePermission":{ + "shape":"String", + "documentation":"Describes the effective permissions on this S3 bucket, after factoring all the attached policies.
", + "locationName":"effectivePermission" + }, + "PublicReadAccess":{ + "shape":"PublicAccessStatus", + "documentation":"Indicates whether or not the public read access is allowed for an Amazon S3 bucket.
", + "locationName":"publicReadAccess" + }, + "PublicWriteAccess":{ + "shape":"PublicAccessStatus", + "documentation":"Indicates whether or not the public write access is allowed for an Amazon S3 bucket.
", + "locationName":"publicWriteAccess" + }, + "AccountPublicAccess":{ + "shape":"PublicAccessConfiguration", + "documentation":"Contains information about the public access policies that apply to the Amazon S3 bucket at the account level.
", + "locationName":"accountPublicAccess" + }, + "BucketPublicAccess":{ + "shape":"PublicAccessConfiguration", + "documentation":"Contains information about public access policies that apply to the Amazon S3 bucket.
", + "locationName":"bucketPublicAccess" + }, + "S3ObjectUids":{ + "shape":"S3ObjectUids", + "documentation":"Represents a list of Amazon S3 object identifiers.
", + "locationName":"s3ObjectUids" + } + }, + "documentation":"Contains information about the Amazon S3 bucket policies and encryption.
" + }, "S3BucketDetail":{ "type":"structure", "members":{ @@ -7891,6 +8455,27 @@ }, "documentation":"Describes whether S3 data event logs will be enabled as a data source.
" }, + "S3Object":{ + "type":"structure", + "members":{ + "ETag":{ + "shape":"String", + "documentation":"The entity tag is a hash of the Amazon S3 object. The ETag reflects changes only to the contents of an object, and not its metadata.
", + "locationName":"eTag" + }, + "Key":{ + "shape":"String", + "documentation":"The key of the Amazon S3 object.
", + "locationName":"key" + }, + "VersionId":{ + "shape":"String", + "documentation":"The version Id of the Amazon S3 object.
", + "locationName":"versionId" + } + }, + "documentation":"Contains information about the Amazon S3 object.
" + }, "S3ObjectDetail":{ "type":"structure", "members":{ @@ -7926,6 +8511,10 @@ "type":"list", "member":{"shape":"S3ObjectDetail"} }, + "S3ObjectUids":{ + "type":"list", + "member":{"shape":"String"} + }, "Scan":{ "type":"structure", "members":{ @@ -8266,6 +8855,56 @@ "type":"string", "sensitive":true }, + "Sequence":{ + "type":"structure", + "required":[ + "Uid", + "Description", + "Signals" + ], + "members":{ + "Uid":{ + "shape":"String", + "documentation":"Unique identifier of the attack sequence.
", + "locationName":"uid" + }, + "Description":{ + "shape":"SequenceDescription", + "documentation":"Description of the attack sequence.
", + "locationName":"description" + }, + "Actors":{ + "shape":"Actors", + "documentation":"Contains information about the actors involved in the attack sequence.
", + "locationName":"actors" + }, + "Resources":{ + "shape":"Resources", + "documentation":"Contains information about the resources involved in the attack sequence.
", + "locationName":"resources" + }, + "Endpoints":{ + "shape":"NetworkEndpoints", + "documentation":"Contains information about the network endpoints that were used in the attack sequence.
", + "locationName":"endpoints" + }, + "Signals":{ + "shape":"Signals", + "documentation":"Contains information about the signals involved in the attack sequence.
", + "locationName":"signals" + }, + "SequenceIndicators":{ + "shape":"Indicators", + "documentation":"Contains information about the indicators observed in the attack sequence.
", + "locationName":"sequenceIndicators" + } + }, + "documentation":"Contains information about the GuardDuty attack sequence finding.
" + }, + "SequenceDescription":{ + "type":"string", + "max":4096 + }, "Service":{ "type":"structure", "members":{ @@ -8368,6 +9007,32 @@ }, "documentation":"Additional information about the generated finding.
" }, + "Session":{ + "type":"structure", + "members":{ + "Uid":{ + "shape":"String", + "documentation":"The unique identifier of the session.
", + "locationName":"uid" + }, + "MfaStatus":{ + "shape":"MfaStatus", + "documentation":"Indicates whether or not multi-factor authencation (MFA) was used during authentication.
In Amazon Web Services CloudTrail, you can find this value as userIdentity.sessionContext.attributes.mfaAuthenticated
.
The timestamp for when the session was created.
In Amazon Web Services CloudTrail, you can find this value as userIdentity.sessionContext.attributes.creationDate
.
Identifier of the session issuer.
In Amazon Web Services CloudTrail, you can find this value as userIdentity.sessionContext.sessionIssuer.arn
.
Contains information about the authenticated session.
" + }, "SessionNameList":{ "type":"list", "member":{"shape":"String"} @@ -8393,6 +9058,110 @@ }, "documentation":"Information about severity level for each finding type.
" }, + "Signal":{ + "type":"structure", + "required":[ + "Uid", + "Type", + "Name", + "CreatedAt", + "UpdatedAt", + "FirstSeenAt", + "LastSeenAt", + "Count" + ], + "members":{ + "Uid":{ + "shape":"String", + "documentation":"The unique identifier of the signal.
", + "locationName":"uid" + }, + "Type":{ + "shape":"SignalType", + "documentation":"The type of the signal used to identify an attack sequence.
Signals can be GuardDuty findings or activities observed in data sources that GuardDuty monitors. For more information, see Foundational data sources in the GuardDuty User Guide.
A signal type can be one of the valid values listed in this API. Here are the related descriptions:
FINDING
- Individually generated GuardDuty finding.
CLOUD_TRAIL
- Activity observed from CloudTrail logs
S3_DATA_EVENTS
- Activity observed from CloudTrail data events for S3. Activities associated with this type will show up only when you have enabled GuardDuty S3 Protection feature in your account. For more information about S3 Protection and steps to enable it, see S3 Protection in the GuardDuty User Guide.
The description of the signal.
", + "locationName":"description" + }, + "Name":{ + "shape":"String", + "documentation":"The name of the signal. For example, when signal type is FINDING
, the signal name is the name of the finding.
The timestamp when the first finding or activity related to this signal was observed.
", + "locationName":"createdAt" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The timestamp when this signal was last observed.
", + "locationName":"updatedAt" + }, + "FirstSeenAt":{ + "shape":"Timestamp", + "documentation":"The timestamp when the first finding or activity related to this signal was observed.
", + "locationName":"firstSeenAt" + }, + "LastSeenAt":{ + "shape":"Timestamp", + "documentation":"The timestamp when the last finding or activity related to this signal was observed.
", + "locationName":"lastSeenAt" + }, + "Severity":{ + "shape":"Double", + "documentation":"The severity associated with the signal. For more information about severity, see Findings severity levels in the GuardDuty User Guide.
", + "locationName":"severity" + }, + "Count":{ + "shape":"Integer", + "documentation":"The number of times this signal was observed.
", + "locationName":"count" + }, + "ResourceUids":{ + "shape":"ResourceUids", + "documentation":"Information about the unique identifiers of the resources involved in the signal.
", + "locationName":"resourceUids" + }, + "ActorIds":{ + "shape":"ActorIds", + "documentation":"Information about the IDs of the threat actors involved in the signal.
", + "locationName":"actorIds" + }, + "EndpointIds":{ + "shape":"EndpointIds", + "documentation":"Information about the endpoint IDs associated with this signal.
", + "locationName":"endpointIds" + }, + "SignalIndicators":{ + "shape":"Indicators", + "documentation":"Contains information about the indicators associated with the signals.
", + "locationName":"signalIndicators" + } + }, + "documentation":"Contains information about the signals involved in the attack sequence.
" + }, + "SignalDescription":{ + "type":"string", + "max":2000 + }, + "SignalType":{ + "type":"string", + "enum":[ + "FINDING", + "CLOUD_TRAIL", + "S3_DATA_EVENTS" + ] + }, + "Signals":{ + "type":"list", + "member":{"shape":"Signal"}, + "max":100, + "min":2 + }, "SortCriteria":{ "type":"structure", "members":{ @@ -9432,6 +10201,42 @@ "type":"list", "member":{"shape":"UsageTopAccountsResult"} }, + "User":{ + "type":"structure", + "required":[ + "Name", + "Uid", + "Type" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"The name of the user.
", + "locationName":"name" + }, + "Uid":{ + "shape":"String", + "documentation":"The unique identifier of the user.
", + "locationName":"uid" + }, + "Type":{ + "shape":"String", + "documentation":"The type of the user.
", + "locationName":"type" + }, + "CredentialUid":{ + "shape":"String", + "documentation":"The credentials of the user ID.
", + "locationName":"credentialUid" + }, + "Account":{ + "shape":"Account", + "documentation":"Contains information about the Amazon Web Services account.
", + "locationName":"account" + } + }, + "documentation":"Contains information about the user involved in the attack sequence.
" + }, "Volume":{ "type":"structure", "members":{ From 0968fc6660822a0619d68c079fb407f6c7cc8613 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:20 +0000 Subject: [PATCH 24/35] Amazon CloudWatch Logs Update: Adds PutIntegration, GetIntegration, ListIntegrations and DeleteIntegration APIs. Adds QueryLanguage support to StartQuery, GetQueryResults, DescribeQueries, DescribeQueryDefinitions, and PutQueryDefinition APIs. --- .../feature-AmazonCloudWatchLogs-dd286dc.json | 6 + .../codegen-resources/service-2.json | 551 +++++++++++++++++- 2 files changed, 552 insertions(+), 5 deletions(-) create mode 100644 .changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json diff --git a/.changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json b/.changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json new file mode 100644 index 00000000000..65a06e6052d --- /dev/null +++ b/.changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "Adds PutIntegration, GetIntegration, ListIntegrations and DeleteIntegration APIs. Adds QueryLanguage support to StartQuery, GetQueryResults, DescribeQueries, DescribeQueryDefinitions, and PutQueryDefinition APIs." +} diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json index 7e7a364a07f..6da54712c8d 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json @@ -257,6 +257,22 @@ ], "documentation":"Deletes a log-group level field index policy that was applied to a single log group. The indexing of the log events that happened before you delete the policy will still be used for as many as 30 days to improve CloudWatch Logs Insights queries.
You can't use this operation to delete an account-level index policy. Instead, use DeletAccountPolicy.
If you delete a log-group level field index policy and there is an account-level field index policy, in a few minutes the log group begins using that account-wide policy to index new incoming log events.
" }, + "DeleteIntegration":{ + "name":"DeleteIntegration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIntegrationRequest"}, + "output":{"shape":"DeleteIntegrationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"} + ], + "documentation":"Deletes the integration between CloudWatch Logs and OpenSearch Service. If your integration has active vended logs dashboards, you must specify true
for the force
parameter, otherwise the operation will fail. If you delete the integration by setting force
to true
, all your vended logs dashboards powered by OpenSearch Service will be deleted and the data that was on them will no longer be accessible.
Retrieves complete information about one delivery source.
" }, + "GetIntegration":{ + "name":"GetIntegration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIntegrationRequest"}, + "output":{"shape":"GetIntegrationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns information about one integration between CloudWatch Logs and OpenSearch Service.
" + }, "GetLogAnomalyDetector":{ "name":"GetLogAnomalyDetector", "http":{ @@ -858,6 +889,20 @@ ], "documentation":"Returns a list of anomalies that log anomaly detectors have found. For details about the structure format of each anomaly object that is returned, see the example in this section.
" }, + "ListIntegrations":{ + "name":"ListIntegrations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIntegrationsRequest"}, + "output":{"shape":"ListIntegrationsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"Returns a list of integrations between CloudWatch Logs and other services in this account. Currently, only one integration can be created in an account, and this integration must be with OpenSearch Service.
" + }, "ListLogAnomalyDetectors":{ "name":"ListLogAnomalyDetectors", "http":{ @@ -1052,6 +1097,22 @@ ], "documentation":"Creates or updates a field index policy for the specified log group. Only log groups in the Standard log class support field index policies. For more information about log classes, see Log classes.
You can use field index policies to create field indexes on fields found in log events in the log group. Creating field indexes speeds up and lowers the costs for CloudWatch Logs Insights queries that reference those field indexes, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, userID, and instance IDs. For more information, see Create field indexes to improve query performance and reduce costs.
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId IN [value, value, ...]
will process fewer log events to reduce costs, and have improved performance.
Each index policy has the following quotas and restrictions:
As many as 20 fields can be included in the policy.
Each field name can include as many as 100 characters.
Matches of log events to the names of indexed fields are case-sensitive. For example, a field index of RequestId
won't match a log event containing requestId
.
Log group-level field index policies created with PutIndexPolicy
override account-level field index policies created with PutAccountPolicy. If you use PutIndexPolicy
to create a field index policy for a log group, that log group uses only that policy. The log group ignores any account-wide field index policy that you might have created.
Creates an integration between CloudWatch Logs and another service in this account. Currently, only integrations with OpenSearch Service are supported, and currently you can have only one integration in your account.
Integrating with OpenSearch Service makes it possible for you to create curated vended logs dashboards, powered by OpenSearch Service analytics. For more information, see Vended log dashboards powered by Amazon OpenSearch Service.
You can use this operation only to create a new integration. You can't modify an existing integration.
" + }, "PutLogEvents":{ "name":"PutLogEvents", "http":{ @@ -1709,6 +1770,11 @@ "min":36, "pattern":"\\S{36,128}" }, + "CollectionRetentionDays":{ + "type":"integer", + "max":30, + "min":1 + }, "Column":{ "type":"string", "max":128, @@ -2006,6 +2072,10 @@ } } }, + "DashboardViewerPrincipals":{ + "type":"list", + "member":{"shape":"Arn"} + }, "DataAlreadyAcceptedException":{ "type":"structure", "members":{ @@ -2160,6 +2230,25 @@ "members":{ } }, + "DeleteIntegrationRequest":{ + "type":"structure", + "required":["integrationName"], + "members":{ + "integrationName":{ + "shape":"IntegrationName", + "documentation":"The name of the integration to delete. To find the name of your integration, use ListIntegrations.
" + }, + "force":{ + "shape":"Force", + "documentation":"Specify true
to force the deletion of the integration even if vended logs dashboards currently exist.
The default is false
.
Limits the number of returned queries to the specified number.
" }, - "nextToken":{"shape":"NextToken"} + "nextToken":{"shape":"NextToken"}, + "queryLanguage":{ + "shape":"QueryLanguage", + "documentation":"Limits the returned queries to only the queries that use the specified query language.
" + } } }, "DescribeQueriesResponse":{ @@ -2878,6 +2971,10 @@ "DescribeQueryDefinitionsRequest":{ "type":"structure", "members":{ + "queryLanguage":{ + "shape":"QueryLanguage", + "documentation":"The query language used for this query. For more information about the query languages that CloudWatch Logs supports, see Supported query languages.
" + }, "queryDefinitionNamePrefix":{ "shape":"QueryDefinitionName", "documentation":"Use this parameter to filter your results to only the query definitions that have names that start with the prefix you specify.
" @@ -3411,6 +3508,7 @@ "last" ] }, + "Force":{"type":"boolean"}, "ForceUpdate":{"type":"boolean"}, "FromKey":{ "type":"string", @@ -3520,6 +3618,37 @@ } } }, + "GetIntegrationRequest":{ + "type":"structure", + "required":["integrationName"], + "members":{ + "integrationName":{ + "shape":"IntegrationName", + "documentation":"The name of the integration that you want to find information about. To find the name of your integration, use ListIntegrations
" + } + } + }, + "GetIntegrationResponse":{ + "type":"structure", + "members":{ + "integrationName":{ + "shape":"IntegrationName", + "documentation":"The name of the integration.
" + }, + "integrationType":{ + "shape":"IntegrationType", + "documentation":"The type of integration. Integrations with OpenSearch Service have the type OPENSEARCH
.
The current status of this integration.
" + }, + "integrationDetails":{ + "shape":"IntegrationDetails", + "documentation":"A structure that contains information about the integration configuration. For an integration with OpenSearch Service, this includes information about OpenSearch Service resources such as the collection, the workspace, and policies.
" + } + } + }, "GetLogAnomalyDetectorRequest":{ "type":"structure", "required":["anomalyDetectorArn"], @@ -3689,6 +3818,10 @@ "GetQueryResultsResponse":{ "type":"structure", "members":{ + "queryLanguage":{ + "shape":"QueryLanguage", + "documentation":"The query language used for this query. For more information about the query languages that CloudWatch Logs supports, see Supported query languages.
" + }, "results":{ "shape":"QueryResults", "documentation":"The log events that matched the query criteria during the most recent time it ran.
The results
value is an array of arrays. Each log event is one object in the top-level array. Each of these log event objects is an array of field
/value
pairs.
This structure contains complete information about one integration between CloudWatch Logs and OpenSearch Service.
" + } + }, + "documentation":"This structure contains information about the integration configuration. For an integration with OpenSearch Service, this includes information about OpenSearch Service resources such as the collection, the workspace, and policies.
This structure is returned by a GetIntegration operation.
", + "union":true + }, + "IntegrationName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "IntegrationNamePrefix":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "IntegrationStatus":{ + "type":"string", + "enum":[ + "PROVISIONING", + "ACTIVE", + "FAILED" + ] + }, + "IntegrationStatusMessage":{ + "type":"string", + "min":1 + }, + "IntegrationSummaries":{ + "type":"list", + "member":{"shape":"IntegrationSummary"} + }, + "IntegrationSummary":{ + "type":"structure", + "members":{ + "integrationName":{ + "shape":"IntegrationName", + "documentation":"The name of this integration.
" + }, + "integrationType":{ + "shape":"IntegrationType", + "documentation":"The type of integration. Integrations with OpenSearch Service have the type OPENSEARCH
.
The current status of this integration.
" + } + }, + "documentation":"This structure contains information about one CloudWatch Logs integration. This structure is returned by a ListIntegrations operation.
" + }, + "IntegrationType":{ + "type":"string", + "enum":["OPENSEARCH"] + }, "Interleaved":{"type":"boolean"}, "InvalidOperationException":{ "type":"structure", @@ -3927,6 +4121,32 @@ "nextToken":{"shape":"NextToken"} } }, + "ListIntegrationsRequest":{ + "type":"structure", + "members":{ + "integrationNamePrefix":{ + "shape":"IntegrationNamePrefix", + "documentation":"To limit the results to integrations that start with a certain name prefix, specify that name prefix here.
" + }, + "integrationType":{ + "shape":"IntegrationType", + "documentation":"To limit the results to integrations of a certain type, specify that type here.
" + }, + "integrationStatus":{ + "shape":"IntegrationStatus", + "documentation":"To limit the results to integrations with a certain status, specify that status here.
" + } + } + }, + "ListIntegrationsResponse":{ + "type":"structure", + "members":{ + "integrationSummaries":{ + "shape":"IntegrationSummaries", + "documentation":"An array, where each object in the array contains information about one CloudWatch Logs integration in this account.
" + } + } + }, "ListLogAnomalyDetectorsLimit":{ "type":"integer", "max":50, @@ -4547,6 +4767,257 @@ "max":128, "min":1 }, + "OpenSearchApplication":{ + "type":"structure", + "members":{ + "applicationEndpoint":{ + "shape":"OpenSearchApplicationEndpoint", + "documentation":"The endpoint of the application.
" + }, + "applicationArn":{ + "shape":"Arn", + "documentation":"The Amazon Resource Name (ARN) of the application.
" + }, + "applicationId":{ + "shape":"OpenSearchApplicationId", + "documentation":"The ID of the application.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service application used for this integration. An OpenSearch Service application is the web application created by the integration with CloudWatch Logs. It hosts the vended logs dashboards.
" + }, + "OpenSearchApplicationEndpoint":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^https://[\\.\\-_/#:A-Za-z0-9]+\\.com$" + }, + "OpenSearchApplicationId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "OpenSearchCollection":{ + "type":"structure", + "members":{ + "collectionEndpoint":{ + "shape":"OpenSearchCollectionEndpoint", + "documentation":"The endpoint of the collection.
" + }, + "collectionArn":{ + "shape":"Arn", + "documentation":"The ARN of the collection.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service collection used for this integration. An OpenSearch Service collection is a logical grouping of one or more indexes that represent an analytics workload. For more information, see Creating and managing OpenSearch Service Serverless collections.
" + }, + "OpenSearchCollectionEndpoint":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^https://[\\.\\-_/#:A-Za-z0-9]+\\.com$" + }, + "OpenSearchDataAccessPolicy":{ + "type":"structure", + "members":{ + "policyName":{ + "shape":"OpenSearchPolicyName", + "documentation":"The name of the data access policy.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service data access policy used for this integration. The access policy defines the access controls for the collection. This data access policy was automatically created as part of the integration setup. For more information about OpenSearch Service data access policies, see Data access control for Amazon OpenSearch Serverless in the OpenSearch Service Developer Guide.
" + }, + "OpenSearchDataSource":{ + "type":"structure", + "members":{ + "dataSourceName":{ + "shape":"OpenSearchDataSourceName", + "documentation":"The name of the OpenSearch Service data source.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service data source used for this integration. This data source was created as part of the integration setup. An OpenSearch Service data source defines the source and destination for OpenSearch Service queries. It includes the role required to execute queries and write to collections.
For more information about OpenSearch Service data sources , see Creating OpenSearch Service data source integrations with Amazon S3.
" + }, + "OpenSearchDataSourceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "OpenSearchEncryptionPolicy":{ + "type":"structure", + "members":{ + "policyName":{ + "shape":"OpenSearchPolicyName", + "documentation":"The name of the encryption policy.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service encryption policy used for this integration. The encryption policy was created automatically when you created the integration. For more information, see Encryption policies in the OpenSearch Service Developer Guide.
" + }, + "OpenSearchIntegrationDetails":{ + "type":"structure", + "members":{ + "dataSource":{ + "shape":"OpenSearchDataSource", + "documentation":"This structure contains information about the OpenSearch Service data source used for this integration. This data source was created as part of the integration setup. An OpenSearch Service data source defines the source and destination for OpenSearch Service queries. It includes the role required to execute queries and write to collections.
For more information about OpenSearch Service data sources , see Creating OpenSearch Service data source integrations with Amazon S3.
" + }, + "application":{ + "shape":"OpenSearchApplication", + "documentation":"This structure contains information about the OpenSearch Service application used for this integration. An OpenSearch Service application is the web application that was created by the integration with CloudWatch Logs. It hosts the vended logs dashboards.
" + }, + "collection":{ + "shape":"OpenSearchCollection", + "documentation":"This structure contains information about the OpenSearch Service collection used for this integration. This collection was created as part of the integration setup. An OpenSearch Service collection is a logical grouping of one or more indexes that represent an analytics workload. For more information, see Creating and managing OpenSearch Service Serverless collections.
" + }, + "workspace":{ + "shape":"OpenSearchWorkspace", + "documentation":"This structure contains information about the OpenSearch Service workspace used for this integration. An OpenSearch Service workspace is the collection of dashboards along with other OpenSearch Service tools. This workspace was created automatically as part of the integration setup. For more information, see Centralized OpenSearch user interface (Dashboards) with OpenSearch Service.
" + }, + "encryptionPolicy":{ + "shape":"OpenSearchEncryptionPolicy", + "documentation":"This structure contains information about the OpenSearch Service encryption policy used for this integration. The encryption policy was created automatically when you created the integration. For more information, see Encryption policies in the OpenSearch Service Developer Guide.
" + }, + "networkPolicy":{ + "shape":"OpenSearchNetworkPolicy", + "documentation":"This structure contains information about the OpenSearch Service network policy used for this integration. The network policy assigns network access settings to collections. For more information, see Network policies in the OpenSearch Service Developer Guide.
" + }, + "accessPolicy":{ + "shape":"OpenSearchDataAccessPolicy", + "documentation":"This structure contains information about the OpenSearch Service data access policy used for this integration. The access policy defines the access controls for the collection. This data access policy was automatically created as part of the integration setup. For more information about OpenSearch Service data access policies, see Data access control for Amazon OpenSearch Serverless in the OpenSearch Service Developer Guide.
" + }, + "lifecyclePolicy":{ + "shape":"OpenSearchLifecyclePolicy", + "documentation":"This structure contains information about the OpenSearch Service data lifecycle policy used for this integration. The lifecycle policy determines the lifespan of the data in the collection. It was automatically created as part of the integration setup.
For more information, see Using data lifecycle policies with OpenSearch Service Serverless in the OpenSearch Service Developer Guide.
" + } + }, + "documentation":"This structure contains complete information about one CloudWatch Logs integration. This structure is returned by a GetIntegration operation.
" + }, + "OpenSearchLifecyclePolicy":{ + "type":"structure", + "members":{ + "policyName":{ + "shape":"OpenSearchPolicyName", + "documentation":"The name of the lifecycle policy.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service data lifecycle policy used for this integration. The lifecycle policy determines the lifespan of the data in the collection. It was automatically created as part of the integration setup.
For more information, see Using data lifecycle policies with OpenSearch Service Serverless in the OpenSearch Service Developer Guide.
" + }, + "OpenSearchNetworkPolicy":{ + "type":"structure", + "members":{ + "policyName":{ + "shape":"OpenSearchPolicyName", + "documentation":"The name of the network policy.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of this OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service network policy used for this integration. The network policy assigns network access settings to collections. For more information, see Network policies in the OpenSearch Service Developer Guide.
" + }, + "OpenSearchPolicyName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "OpenSearchResourceConfig":{ + "type":"structure", + "required":[ + "dataSourceRoleArn", + "dashboardViewerPrincipals", + "retentionDays" + ], + "members":{ + "kmsKeyArn":{ + "shape":"Arn", + "documentation":"To have the vended dashboard data encrypted with KMS instead of the CloudWatch Logs default encryption method, specify the ARN of the KMS key that you want to use.
" + }, + "dataSourceRoleArn":{ + "shape":"Arn", + "documentation":"Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Create an IAM role to access the OpenSearch Service collection in the CloudWatch Logs User Guide.
" + }, + "dashboardViewerPrincipals":{ + "shape":"DashboardViewerPrincipals", + "documentation":"Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.
In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardsAccess IAM policy. For more information, see
If you want to use an existing OpenSearch Service application for your integration with OpenSearch Service, specify it here. If you omit this, a new application will be created.
" + }, + "retentionDays":{ + "shape":"CollectionRetentionDays", + "documentation":"Specify how many days that you want the data derived by OpenSearch Service to be retained in the index that the dashboard refers to. This also sets the maximum time period that you can choose when viewing data in the dashboard. Choosing a longer time frame will incur additional costs.
" + } + }, + "documentation":"This structure contains configuration details about an integration between CloudWatch Logs and OpenSearch Service.
" + }, + "OpenSearchResourceStatus":{ + "type":"structure", + "members":{ + "status":{ + "shape":"OpenSearchResourceStatusType", + "documentation":"The current status of this resource.
" + }, + "statusMessage":{ + "shape":"IntegrationStatusMessage", + "documentation":"A message with additional information about the status of this resource.
" + } + }, + "documentation":"This structure contains information about the status of an OpenSearch Service resource.
" + }, + "OpenSearchResourceStatusType":{ + "type":"string", + "enum":[ + "ACTIVE", + "NOT_FOUND", + "ERROR" + ] + }, + "OpenSearchWorkspace":{ + "type":"structure", + "members":{ + "workspaceId":{ + "shape":"OpenSearchWorkspaceId", + "documentation":"The ID of this workspace.
" + }, + "status":{ + "shape":"OpenSearchResourceStatus", + "documentation":"This structure contains information about the status of an OpenSearch Service resource.
" + } + }, + "documentation":"This structure contains information about the OpenSearch Service workspace used for this integration. An OpenSearch Service workspace is the collection of dashboards along with other OpenSearch Service tools. This workspace was created automatically as part of the integration setup. For more information, see Centralized OpenSearch user interface (Dashboards) with OpenSearch Service.
" + }, + "OpenSearchWorkspaceId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, "OperationAbortedException":{ "type":"structure", "members":{ @@ -5128,6 +5599,41 @@ } } }, + "PutIntegrationRequest":{ + "type":"structure", + "required":[ + "integrationName", + "resourceConfig", + "integrationType" + ], + "members":{ + "integrationName":{ + "shape":"IntegrationName", + "documentation":"A name for the integration.
" + }, + "resourceConfig":{ + "shape":"ResourceConfig", + "documentation":"A structure that contains configuration information for the integration that you are creating.
" + }, + "integrationType":{ + "shape":"IntegrationType", + "documentation":"The type of integration. Currently, the only supported type is OPENSEARCH
.
The name of the integration that you just created.
" + }, + "integrationStatus":{ + "shape":"IntegrationStatus", + "documentation":"The status of the integration that you just created.
After you create an integration, it takes a few minutes to complete. During this time, you'll see the status as PROVISIONING
.
Specify the query language to use for this query. The options are Logs Insights QL, OpenSearch PPL, and OpenSearch SQL. For more information about the query languages that CloudWatch Logs supports, see Supported query languages.
" + }, "name":{ "shape":"QueryDefinitionName", "documentation":"A name for the query definition. If you are saving numerous query definitions, we recommend that you name them. This way, you can find the ones you want by using the first part of the name as a filter in the queryDefinitionNamePrefix
parameter of DescribeQueryDefinitions.
Use this parameter to include specific log groups as part of your query definition.
If you are updating a query definition and you omit this parameter, then the updated definition will contain no log groups.
" + "documentation":"Use this parameter to include specific log groups as part of your query definition. If your query uses the OpenSearch Service query language, you specify the log group names inside the querystring
instead of here.
If you are updating an existing query definition for the Logs Insights QL or OpenSearch Service PPL and you omit this parameter, then the updated definition will contain no log groups.
" }, "queryString":{ "shape":"QueryDefinitionString", @@ -5369,6 +5879,10 @@ "QueryDefinition":{ "type":"structure", "members":{ + "queryLanguage":{ + "shape":"QueryLanguage", + "documentation":"The query language used for this query. For more information about the query languages that CloudWatch Logs supports, see Supported query languages.
" + }, "queryDefinitionId":{ "shape":"QueryId", "documentation":"The unique ID of the query definition.
" @@ -5414,6 +5928,10 @@ "QueryInfo":{ "type":"structure", "members":{ + "queryLanguage":{ + "shape":"QueryLanguage", + "documentation":"The query language used for this query. For more information about the query languages that CloudWatch Logs supports, see Supported query languages.
" + }, "queryId":{ "shape":"QueryId", "documentation":"The unique ID number of this query.
" @@ -5441,6 +5959,14 @@ "type":"list", "member":{"shape":"QueryInfo"} }, + "QueryLanguage":{ + "type":"string", + "enum":[ + "CWLI", + "SQL", + "PPL" + ] + }, "QueryListMaxResults":{ "type":"integer", "max":1000, @@ -5611,6 +6137,17 @@ "type":"list", "member":{"shape":"Arn"} }, + "ResourceConfig":{ + "type":"structure", + "members":{ + "openSearchResourceConfig":{ + "shape":"OpenSearchResourceConfig", + "documentation":"This structure contains configuration details about an integration between CloudWatch Logs and OpenSearch Service.
" + } + }, + "documentation":"This structure contains configuration details about an integration between CloudWatch Logs and another entity.
", + "union":true + }, "ResourceIdentifier":{ "type":"string", "max":2048, @@ -5909,17 +6446,21 @@ "queryString" ], "members":{ + "queryLanguage":{ + "shape":"QueryLanguage", + "documentation":"Specify the query language to use for this query. The options are Logs Insights QL, OpenSearch PPL, and OpenSearch SQL. For more information about the query languages that CloudWatch Logs supports, see Supported query languages.
" + }, "logGroupName":{ "shape":"LogGroupName", - "documentation":"The log group on which to perform the query.
" + "documentation":"The log group on which to perform the query.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
. The exception is queries using the OpenSearch Service SQL query language, where you specify the log group names inside the querystring
instead of here.
The list of log groups to be queried. You can include up to 50 log groups.
" + "documentation":"The list of log groups to be queried. You can include up to 50 log groups.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
. The exception is queries using the OpenSearch Service SQL query language, where you specify the log group names inside the querystring
instead of here.
The list of log groups to query. You can include up to 50 log groups.
You can specify them by the log group name or ARN. If a log group that you're querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account.
If you specify an ARN, use the format arn:aws:logs:region:account-id:log-group:log_group_name Don't include an * at the end.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
.
The list of log groups to query. You can include up to 50 log groups.
You can specify them by the log group name or ARN. If a log group that you're querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account.
If you specify an ARN, use the format arn:aws:logs:region:account-id:log-group:log_group_name Don't include an * at the end.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
. The exception is queries using the OpenSearch Service SQL query language, where you specify the log group names inside the querystring
instead of here.
The ID of the threat actor.
" + }, + "User":{ + "shape":"ActorUser", + "documentation":"Contains information about the user credentials used by the threat actor.
" + }, + "Session":{ + "shape":"ActorSession", + "documentation":"Contains information about the user session where the activity initiated.
" + } + }, + "documentation":"Information about the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, + "ActorSession":{ + "type":"structure", + "members":{ + "Uid":{ + "shape":"NonEmptyString", + "documentation":"Unique identifier of the session.
" + }, + "MfaStatus":{ + "shape":"ActorSessionMfaStatus", + "documentation":"Indicates whether multi-factor authentication (MFA) was used for authentication during the session.
In CloudTrail, you can find this value as userIdentity.sessionContext.attributes.mfaAuthenticated
.
The timestamp for when the session was created.
In CloudTrail, you can find this value as userIdentity.sessionContext.attributes.creationDate
.
The issuer of the session.
In CloudTrail, you can find this value as userIdentity.sessionContext.sessionIssuer.arn
.
Contains information about the authenticated session used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, + "ActorSessionMfaStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ActorUser":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"The name of the threat actor.
" + }, + "Uid":{ + "shape":"NonEmptyString", + "documentation":"The unique identifier of the threat actor.
" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"The type of user.
" + }, + "CredentialUid":{ + "shape":"NonEmptyString", + "documentation":"Unique identifier of the threat actor’s user credentials.
" + }, + "Account":{ + "shape":"UserAccount", + "documentation":"The account of the threat actor.
" + } + }, + "documentation":"Contains information about the credentials used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, + "ActorsList":{ + "type":"list", + "member":{"shape":"Actor"}, + "max":10, + "min":0 + }, "Adjustment":{ "type":"structure", "members":{ @@ -13878,6 +13957,10 @@ "AwsAccountName":{ "shape":"NonEmptyString", "documentation":"The name of the Amazon Web Services account from which a finding was generated.
Length Constraints: Minimum length of 1. Maximum length of 50.
" + }, + "Detection":{ + "shape":"Detection", + "documentation":"Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" } }, "documentation":"Provides a consistent format for Security Hub findings. AwsSecurityFinding
format allows you to share findings between Amazon Web Services security services and third-party solutions.
A finding is a potential security issue generated either by Amazon Web Services services or by the integrated third-party solutions and standards checks.
Provides details about an attack sequence.
" + } + }, + "documentation":"A top-level object field that provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, "DisableImportFindingsForProductRequest":{ "type":"structure", "required":["ProductSubscriptionArn"], @@ -17643,6 +17743,34 @@ "type":"list", "member":{"shape":"ImportFindingsError"} }, + "Indicator":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"NonEmptyString", + "documentation":"The name of the indicator that’s present in the attack sequence finding.
" + }, + "Values":{ + "shape":"NonEmptyStringList", + "documentation":"Values associated with each indicator key. For example, if the indicator key is SUSPICIOUS_NETWORK
, then the value will be the name of the network. If the indicator key is ATTACK_TACTIC
, then the value will be one of the MITRE tactics.
The title describing the indicator.
" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"The type of indicator.
" + } + }, + "documentation":"Contains information about the indicators observed in an Amazon GuardDuty Extended Threat Detection attack sequence. Indicators include a set of signals, which can be API activities or findings that GuardDuty uses to detect an attack sequence finding. GuardDuty generates an attack sequence finding when multiple signals align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, + "IndicatorsList":{ + "type":"list", + "member":{"shape":"Indicator"}, + "max":100, + "min":0 + }, "Insight":{ "type":"structure", "required":[ @@ -18476,6 +18604,30 @@ }, "documentation":"The details of network-related information about a finding.
" }, + "NetworkAutonomousSystem":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"The name associated with the AS.
" + }, + "Number":{ + "shape":"Integer", + "documentation":"The unique number that identifies the AS.
" + } + }, + "documentation":"Contains information about the Autonomous System (AS) of the network endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, + "NetworkConnection":{ + "type":"structure", + "members":{ + "Direction":{ + "shape":"ConnectionDirection", + "documentation":"The direction in which the network traffic is flowing.
" + } + }, + "documentation":"Contains information about the network connection involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, "NetworkConnectionAction":{ "type":"structure", "members":{ @@ -18513,6 +18665,68 @@ "OUT" ] }, + "NetworkEndpoint":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"NonEmptyString", + "documentation":"The identifier of the network endpoint involved in the attack sequence.
" + }, + "Ip":{ + "shape":"NonEmptyString", + "documentation":"The IP address used in the network endpoint.
" + }, + "Domain":{ + "shape":"NonEmptyString", + "documentation":"The domain information for the network endpoint.
" + }, + "Port":{ + "shape":"Integer", + "documentation":"The port number associated with the network endpoint.
" + }, + "Location":{ + "shape":"NetworkGeoLocation", + "documentation":"Information about the location of the network endpoint.
" + }, + "AutonomousSystem":{ + "shape":"NetworkAutonomousSystem", + "documentation":"The Autonomous System Number (ASN) of the network endpoint.
" + }, + "Connection":{ + "shape":"NetworkConnection", + "documentation":"Information about the network connection.
" + } + }, + "documentation":"Contains information about network endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
This field can provide information about the network endpoints associated with the resource in the attack sequence finding, or about a specific network endpoint used for the attack.
" + }, + "NetworkEndpointsList":{ + "type":"list", + "member":{"shape":"NetworkEndpoint"}, + "max":10, + "min":0 + }, + "NetworkGeoLocation":{ + "type":"structure", + "members":{ + "City":{ + "shape":"NonEmptyString", + "documentation":"The name of the city.
" + }, + "Country":{ + "shape":"NonEmptyString", + "documentation":"The name of the country.
" + }, + "Lat":{ + "shape":"Double", + "documentation":"The latitude information of the endpoint location.
" + }, + "Lon":{ + "shape":"Double", + "documentation":"The longitude information of the endpoint location.
" + } + }, + "documentation":"Contains information about the location of a network endpoint involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, "NetworkHeader":{ "type":"structure", "members":{ @@ -20320,6 +20534,32 @@ "type":"list", "member":{"shape":"SensitiveDataResult"} }, + "Sequence":{ + "type":"structure", + "members":{ + "Uid":{ + "shape":"NonEmptyString", + "documentation":"Unique identifier of the attack sequence.
" + }, + "Actors":{ + "shape":"ActorsList", + "documentation":"Provides information about the actors involved in the attack sequence.
" + }, + "Endpoints":{ + "shape":"NetworkEndpointsList", + "documentation":"Contains information about the network endpoints that were used in the attack sequence.
" + }, + "Signals":{ + "shape":"SignalsList", + "documentation":"Contains information about the signals involved in the attack sequence.
" + }, + "SequenceIndicators":{ + "shape":"IndicatorsList", + "documentation":" Contains information about the indicators observed in the attack sequence. The values for SignalIndicators are a subset of the values for SequenceIndicators
, but the values for these fields don't always match 1:1.
Contains information about an Amazon GuardDuty Extended Threat Detection attack sequence finding. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, "Severity":{ "type":"structure", "members":{ @@ -20379,6 +20619,78 @@ }, "documentation":"Updates to the severity information for a finding.
" }, + "Signal":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"NonEmptyString", + "documentation":"The type of the signal used to identify an attack sequence.
Signals can be GuardDuty findings or activities observed in data sources that GuardDuty monitors. For more information, see GuardDuty foundational data sources in the Amazon GuardDuty User Guide.
A signal type can be one of the following values. Here are the related descriptions:
FINDING
- Individually generated GuardDuty finding.
CLOUD_TRAIL
- Activity observed from CloudTrail logs
S3_DATA_EVENTS
- Activity observed from CloudTrail data events for Amazon Simple Storage Service (S3). Activities associated with this type will show up only when you have enabled GuardDuty S3 Protection feature in your account. For more information about S3 Protection and the steps to enable it, see S3 Protection in the Amazon GuardDuty User Guide.
The identifier of the signal.
" + }, + "Title":{ + "shape":"NonEmptyString", + "documentation":"The description of the GuardDuty finding.
" + }, + "ProductArn":{ + "shape":"NonEmptyString", + "documentation":"The Amazon Resource Name (ARN) of the product that generated the signal.
" + }, + "ResourceIds":{ + "shape":"NonEmptyStringList", + "documentation":"The ARN or ID of the Amazon Web Services resource associated with the signal.
" + }, + "SignalIndicators":{ + "shape":"IndicatorsList", + "documentation":" Contains information about the indicators associated with the signals in this attack sequence finding. The values for SignalIndicators
are a subset of the values for SequenceIndicators, but the values for these fields don't always match 1:1.
The name of the GuardDuty signal. For example, when signal type is FINDING
, the signal name is the name of the finding.
The timestamp when the first finding or activity related to this signal was observed.
" + }, + "UpdatedAt":{ + "shape":"Long", + "documentation":"The timestamp when this signal was last observed.
" + }, + "FirstSeenAt":{ + "shape":"Long", + "documentation":"The timestamp when the first finding or activity related to this signal was observed.
" + }, + "LastSeenAt":{ + "shape":"Long", + "documentation":"The timestamp when the last finding or activity related to this signal was observed.
" + }, + "Severity":{ + "shape":"Double", + "documentation":"The severity associated with the signal. For more information about severity, see Findings severity levels in the Amazon GuardDuty User Guide.
" + }, + "Count":{ + "shape":"Integer", + "documentation":"The number of times this signal was observed.
" + }, + "ActorIds":{ + "shape":"NonEmptyStringList", + "documentation":"The IDs of the threat actors involved in the signal.
" + }, + "EndpointIds":{ + "shape":"NonEmptyStringList", + "documentation":"Information about the endpoint IDs associated with this signal.
" + } + }, + "documentation":"Contains information about the signals involved in an Amazon GuardDuty Extended Threat Detection attack sequence. An attack sequence is a type of threat detected by GuardDuty. GuardDuty generates an attack sequence finding when multiple events, or signals, align to a potentially suspicious activity. When GuardDuty and Security Hub are integrated, GuardDuty sends attack sequence findings to Security Hub.
A signal can be an API activity or a finding that GuardDuty uses to detect an attack sequence finding.
" + }, + "SignalsList":{ + "type":"list", + "member":{"shape":"Signal"}, + "max":100, + "min":1 + }, "SizeBytes":{"type":"long"}, "SoftwarePackage":{ "type":"structure", @@ -20916,7 +21228,7 @@ "members":{ "ReasonCode":{ "shape":"NonEmptyString", - "documentation":"A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the Security Hub User Guide.
" + "documentation":"A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Compliance details for control findings in the Security Hub User Guide.
" }, "Description":{ "shape":"NonEmptyString", @@ -21656,6 +21968,20 @@ "UPDATING" ] }, + "UserAccount":{ + "type":"structure", + "members":{ + "Uid":{ + "shape":"NonEmptyString", + "documentation":"The unique identifier of the user account involved in the attack sequence.
" + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"The name of the user account involved in the attack sequence.
" + } + }, + "documentation":"Provides Amazon Web Services account information of the user involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.
" + }, "VerificationState":{ "type":"string", "enum":[ From cad124262d0c01347ac681027e4f73124a61c1e0 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:17 +0000 Subject: [PATCH 26/35] Agents for Amazon Bedrock Runtime Update: This release introduces a new Rerank API to leverage reranking models (with integration into Knowledge Bases); APIs to upload documents directly into Knowledge Base; RetrieveAndGenerateStream API for streaming response; Guardrails on Retrieve API; and ability to automatically generate filters --- ...AgentsforAmazonBedrockRuntime-70183c9.json | 6 + .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 644 +++++++++++++++++- 3 files changed, 652 insertions(+), 3 deletions(-) create mode 100644 .changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json diff --git a/.changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json b/.changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json new file mode 100644 index 00000000000..c6fbb0457dc --- /dev/null +++ b/.changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Agents for Amazon Bedrock Runtime", + "contributor": "", + "description": "This release introduces a new Rerank API to leverage reranking models (with integration into Knowledge Bases); APIs to upload documents directly into Knowledge Base; RetrieveAndGenerateStream API for streaming response; Guardrails on Retrieve API; and ability to automatically generate filters" +} diff --git a/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json b/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json index 3b9d74ec369..2743a90d531 100644 --- a/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json +++ b/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,11 @@ "limit_key": "maxItems", "result_key": "memoryContents" }, + "Rerank": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "results" + }, "Retrieve": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json b/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json index fc063540de7..aa4106dffc0 100644 --- a/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json @@ -144,6 +144,28 @@ ], "documentation":"Optimizes a prompt for the task that you specify. For more information, see Optimize a prompt in the Amazon Bedrock User Guide.
" }, + "Rerank":{ + "name":"Rerank", + "http":{ + "method":"POST", + "requestUri":"/rerank", + "responseCode":200 + }, + "input":{"shape":"RerankRequest"}, + "output":{"shape":"RerankResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"Reranks the relevance of sources based on queries. For more information, see Improve the relevance of query responses with a reranker model.
" + }, "Retrieve":{ "name":"Retrieve", "http":{ @@ -187,6 +209,28 @@ {"shape":"ServiceQuotaExceededException"} ], "documentation":"Queries a knowledge base and generates responses based on the retrieved results and using the specified foundation model or inference profile. The response only cites sources that are relevant to the query.
" + }, + "RetrieveAndGenerateStream":{ + "name":"RetrieveAndGenerateStream", + "http":{ + "method":"POST", + "requestUri":"/retrieveAndGenerateStream", + "responseCode":200 + }, + "input":{"shape":"RetrieveAndGenerateStreamRequest"}, + "output":{"shape":"RetrieveAndGenerateStreamResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"Queries a knowledge base and generates responses based on the retrieved results, with output in streaming format.
The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream
.
Contains information about the API operation that was called from the action group and the response body that was returned.
This data type is used in the following API operations:
In the returnControlInvocationResults
of the InvokeAgent request
Contains configurations for a reranker model.
" + }, + "numberOfResults":{ + "shape":"BedrockRerankingConfigurationNumberOfResultsInteger", + "documentation":"The number of results to return after reranking.
" + } + }, + "documentation":"Contains configurations for an Amazon Bedrock reranker model.
" + }, + "BedrockRerankingConfigurationNumberOfResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "BedrockRerankingModelArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/(.*))?$" + }, + "BedrockRerankingModelConfiguration":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "additionalModelRequestFields":{ + "shape":"AdditionalModelRequestFields", + "documentation":"A JSON object whose keys are request fields for the model and whose values are values for those fields.
" + }, + "modelArn":{ + "shape":"BedrockModelArn", + "documentation":"The ARN of the reranker model.
" + } + }, + "documentation":"Contains configurations for a reranker model.
" + }, "Boolean":{ "type":"boolean", "box":true @@ -592,6 +687,17 @@ }, "documentation":"An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.
This data type is used in the following API operations:
InvokeAgent response – in the citations
field
RetrieveAndGenerate response – in the citations
field
The citation.
" + } + }, + "documentation":"A citation event.
", + "event":true + }, "Citations":{ "type":"list", "member":{"shape":"Citation"} @@ -686,7 +792,7 @@ "members":{ "event":{ "shape":"CustomOrchestrationTraceEvent", - "documentation":"The trace event details used with the custom orchestration.
" + "documentation":"The event details used with the custom orchestration.
" }, "traceId":{ "shape":"TraceId", @@ -704,7 +810,7 @@ "documentation":"The text that prompted the event at this step.
" } }, - "documentation":"The event in the custom orchestration sequence.
", + "documentation":"The event in the custom orchestration sequence. Events are the responses which the custom orchestration Lambda function sends as response to the agent.
", "sensitive":true }, "DateTimestamp":{ @@ -872,6 +978,29 @@ "documentation":"Contains information about the failure of the interaction.
", "sensitive":true }, + "FieldForReranking":{ + "type":"structure", + "required":["fieldName"], + "members":{ + "fieldName":{ + "shape":"FieldForRerankingFieldNameString", + "documentation":"The name of a metadata field to include in or exclude from consideration when reranking.
" + } + }, + "documentation":"Contains information for a metadata field to include in or exclude from consideration when reranking.
" + }, + "FieldForRerankingFieldNameString":{ + "type":"string", + "max":2000, + "min":1 + }, + "FieldsForReranking":{ + "type":"list", + "member":{"shape":"FieldForReranking"}, + "max":100, + "min":1, + "sensitive":true + }, "FileBody":{ "type":"blob", "max":1000000, @@ -969,6 +1098,10 @@ "type":"string", "sensitive":true }, + "Float":{ + "type":"float", + "box":true + }, "FlowAliasIdentifier":{ "type":"string", "max":2048, @@ -1704,6 +1837,17 @@ "member":{"shape":"GuardrailCustomWord"}, "sensitive":true }, + "GuardrailEvent":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuadrailAction", + "documentation":"The guardrail action.
" + } + }, + "documentation":"A guardrail event.
", + "event":true + }, "GuardrailIdentifierWithArn":{ "type":"string", "max":2048, @@ -1945,6 +2089,24 @@ "min":1, "sensitive":true }, + "ImplicitFilterConfiguration":{ + "type":"structure", + "required":[ + "metadataAttributes", + "modelArn" + ], + "members":{ + "metadataAttributes":{ + "shape":"MetadataAttributeSchemaList", + "documentation":"Metadata that can be used in a filter.
" + }, + "modelArn":{ + "shape":"BedrockModelArn", + "documentation":"The model that generates the filter.
" + } + }, + "documentation":"Settings for implicit filtering, where a model generates a metadata filter based on the prompt.
" + }, "InferenceConfig":{ "type":"structure", "members":{ @@ -2655,6 +2817,10 @@ "shape":"RetrievalFilter", "documentation":"Specifies the filters to use on the metadata in the knowledge base data sources before returning results. For more information, see Query configurations.
" }, + "implicitFilterConfiguration":{ + "shape":"ImplicitFilterConfiguration", + "documentation":"Settings for implicit filtering.
" + }, "numberOfResults":{ "shape":"KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger", "documentation":"The number of source chunks to retrieve.
", @@ -2663,6 +2829,10 @@ "overrideSearchType":{ "shape":"SearchType", "documentation":"By default, Amazon Bedrock decides a search strategy for you. If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID
search using both vector embeddings and raw text, or SEMANTIC
search using only vector embeddings. For other vector store configurations, only SEMANTIC
search is available. For more information, see Test a knowledge base.
Contains configurations for reranking the retrieved results. For more information, see Improve the relevance of query responses with a reranker model.
" } }, "documentation":"Configurations for how to perform the search query and return results. For more information, see Query configurations.
This data type is used in the following API operations:
Retrieve request – in the vectorSearchConfiguration
field
RetrieveAndGenerate request – in the vectorSearchConfiguration
field
Provides details of the foundation model.
", "sensitive":true }, + "MetadataAttributeSchema":{ + "type":"structure", + "required":[ + "description", + "key", + "type" + ], + "members":{ + "description":{ + "shape":"MetadataAttributeSchemaDescriptionString", + "documentation":"The attribute's description.
" + }, + "key":{ + "shape":"MetadataAttributeSchemaKeyString", + "documentation":"The attribute's key.
" + }, + "type":{ + "shape":"AttributeType", + "documentation":"The attribute's type.
" + } + }, + "documentation":"Details about a metadata attribute.
", + "sensitive":true + }, + "MetadataAttributeSchemaDescriptionString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[\\s\\S]+$" + }, + "MetadataAttributeSchemaKeyString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\s\\S]+$" + }, + "MetadataAttributeSchemaList":{ + "type":"list", + "member":{"shape":"MetadataAttributeSchema"}, + "max":25, + "min":1 + }, + "MetadataConfigurationForReranking":{ + "type":"structure", + "required":["selectionMode"], + "members":{ + "selectionMode":{ + "shape":"RerankingMetadataSelectionMode", + "documentation":"Specifies whether to consider all metadata when reranking, or only the metadata that you select. If you specify SELECTIVE
, include the selectiveModeConfiguration
field.
Contains configurations for the metadata fields to include or exclude when considering reranking.
" + } + }, + "documentation":"Contains configurations for the metadata to use in reranking.
" + }, "MimeType":{"type":"string"}, "ModelIdentifier":{ "type":"string", @@ -3455,6 +3682,225 @@ "DISABLED" ] }, + "RerankDocument":{ + "type":"structure", + "required":["type"], + "members":{ + "jsonDocument":{ + "shape":"Document", + "documentation":"Contains a JSON document to rerank.
" + }, + "textDocument":{ + "shape":"RerankTextDocument", + "documentation":"Contains information about a text document to rerank.
" + }, + "type":{ + "shape":"RerankDocumentType", + "documentation":"The type of document to rerank.
" + } + }, + "documentation":"Contains information about a document to rerank. Choose the type
to define and include the field that corresponds to the type.
Contains information about a text query.
" + }, + "type":{ + "shape":"RerankQueryContentType", + "documentation":"The type of the query.
" + } + }, + "documentation":"Contains information about a query to submit to the reranker model.
", + "sensitive":true + }, + "RerankQueryContentType":{ + "type":"string", + "enum":["TEXT"] + }, + "RerankRequest":{ + "type":"structure", + "required":[ + "queries", + "rerankingConfiguration", + "sources" + ], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"If the total number of results was greater than could fit in a response, a token is returned in the nextToken
field. You can enter that token in this field to return the next batch of results.
An array of objects, each of which contains information about a query to submit to the reranker model.
" + }, + "rerankingConfiguration":{ + "shape":"RerankingConfiguration", + "documentation":"Contains configurations for reranking.
" + }, + "sources":{ + "shape":"RerankSourcesList", + "documentation":"An array of objects, each of which contains information about the sources to rerank.
" + } + } + }, + "RerankResponse":{ + "type":"structure", + "required":["results"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"If the total number of results is greater than can fit in the response, use this token in the nextToken
field when making another request to return the next batch of results.
An array of objects, each of which contains information about the results of reranking.
" + } + } + }, + "RerankResult":{ + "type":"structure", + "required":[ + "index", + "relevanceScore" + ], + "members":{ + "document":{ + "shape":"RerankDocument", + "documentation":"Contains information about the document.
" + }, + "index":{ + "shape":"RerankResultIndexInteger", + "documentation":"The ranking of the document. The lower a number, the higher the document is ranked.
" + }, + "relevanceScore":{ + "shape":"Float", + "documentation":"The relevance score of the document.
" + } + }, + "documentation":"Contains information about a document that was reranked.
" + }, + "RerankResultIndexInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":0 + }, + "RerankResultsList":{ + "type":"list", + "member":{"shape":"RerankResult"} + }, + "RerankSource":{ + "type":"structure", + "required":[ + "inlineDocumentSource", + "type" + ], + "members":{ + "inlineDocumentSource":{ + "shape":"RerankDocument", + "documentation":"Contains an inline definition of a source for reranking.
" + }, + "type":{ + "shape":"RerankSourceType", + "documentation":"The type of the source.
" + } + }, + "documentation":"Contains information about a source for reranking.
", + "sensitive":true + }, + "RerankSourceType":{ + "type":"string", + "enum":["INLINE"] + }, + "RerankSourcesList":{ + "type":"list", + "member":{"shape":"RerankSource"}, + "max":1000, + "min":1, + "sensitive":true + }, + "RerankTextDocument":{ + "type":"structure", + "members":{ + "text":{ + "shape":"RerankTextDocumentTextString", + "documentation":"The text of the document.
" + } + }, + "documentation":"Contains information about a text document to rerank.
", + "sensitive":true + }, + "RerankTextDocumentTextString":{ + "type":"string", + "max":9000, + "min":1 + }, + "RerankingConfiguration":{ + "type":"structure", + "required":[ + "bedrockRerankingConfiguration", + "type" + ], + "members":{ + "bedrockRerankingConfiguration":{ + "shape":"BedrockRerankingConfiguration", + "documentation":"Contains configurations for an Amazon Bedrock reranker.
" + }, + "type":{ + "shape":"RerankingConfigurationType", + "documentation":"The type of reranker that the configurations apply to.
" + } + }, + "documentation":"Contains configurations for reranking.
" + }, + "RerankingConfigurationType":{ + "type":"string", + "enum":["BEDROCK_RERANKING_MODEL"] + }, + "RerankingMetadataSelectionMode":{ + "type":"string", + "enum":[ + "SELECTIVE", + "ALL" + ] + }, + "RerankingMetadataSelectiveModeConfiguration":{ + "type":"structure", + "members":{ + "fieldsToExclude":{ + "shape":"FieldsForReranking", + "documentation":"An array of objects, each of which specifies a metadata field to exclude from consideration when reranking.
" + }, + "fieldsToInclude":{ + "shape":"FieldsForReranking", + "documentation":"An array of objects, each of which specifies a metadata field to include in consideration when reranking. The remaining metadata fields are ignored.
" + } + }, + "documentation":"Contains configurations for the metadata fields to include or exclude when considering reranking. If you include the fieldsToExclude
field, the reranker ignores all the metadata fields that you specify. If you include the fieldsToInclude
field, the reranker uses only the metadata fields that you specify and ignores all others. You can include only one of these fields.
/ @documentation("Description of the using the resource.")
", @@ -3637,6 +4083,16 @@ "documentation":"Contains the cited text from the data source.
This data type is used in the following API operations:
Retrieve response – in the content
field
RetrieveAndGenerate response – in the content
field
InvokeAgent response – in the content
field
The ID of the document.
" + } + }, + "documentation":"Contains information about the location of a document in a custom data source.
" + }, "RetrievalResultLocation":{ "type":"structure", "required":["type"], @@ -3645,6 +4101,10 @@ "shape":"RetrievalResultConfluenceLocation", "documentation":"The Confluence data source location.
" }, + "customDocumentLocation":{ + "shape":"RetrievalResultCustomDocumentLocation", + "documentation":"Specifies the location of a document in a custom data source.
" + }, "s3Location":{ "shape":"RetrievalResultS3Location", "documentation":"The S3 data source location.
" @@ -3676,7 +4136,8 @@ "WEB", "CONFLUENCE", "SALESFORCE", - "SHAREPOINT" + "SHAREPOINT", + "CUSTOM" ] }, "RetrievalResultMetadata":{ @@ -3785,6 +4246,19 @@ "documentation":"Contains the response generated from querying the knowledge base.
This data type is used in the following API operations:
RetrieveAndGenerate response – in the output
field
A text response.
" + } + }, + "documentation":"A retrieve and generate output event.
", + "event":true, + "sensitive":true + }, "RetrieveAndGenerateRequest":{ "type":"structure", "required":["input"], @@ -3843,6 +4317,103 @@ }, "documentation":"Contains configuration about the session with the knowledge base.
This data type is used in the following API operations:
RetrieveAndGenerate request – in the sessionConfiguration
field
Contains the query to be made to the knowledge base.
" + }, + "retrieveAndGenerateConfiguration":{ + "shape":"RetrieveAndGenerateConfiguration", + "documentation":"Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
" + }, + "sessionConfiguration":{ + "shape":"RetrieveAndGenerateSessionConfiguration", + "documentation":"Contains details about the session with the knowledge base.
" + }, + "sessionId":{ + "shape":"SessionId", + "documentation":"The unique identifier of the session. When you first make a RetrieveAndGenerate
request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId
yourself.
The session ID.
", + "location":"header", + "locationName":"x-amzn-bedrock-knowledge-base-session-id" + }, + "stream":{ + "shape":"RetrieveAndGenerateStreamResponseOutput", + "documentation":"A stream of events from the model.
" + } + }, + "payload":"stream" + }, + "RetrieveAndGenerateStreamResponseOutput":{ + "type":"structure", + "members":{ + "accessDeniedException":{ + "shape":"AccessDeniedException", + "documentation":"The request is denied because you do not have sufficient permissions to perform the requested action. For troubleshooting this error, see AccessDeniedException in the Amazon Bedrock User Guide.
" + }, + "badGatewayException":{ + "shape":"BadGatewayException", + "documentation":"The request failed due to a bad gateway error.
" + }, + "citation":{ + "shape":"CitationEvent", + "documentation":"A citation event.
" + }, + "conflictException":{ + "shape":"ConflictException", + "documentation":"Error occurred because of a conflict while performing an operation.
" + }, + "dependencyFailedException":{ + "shape":"DependencyFailedException", + "documentation":"The request failed due to a dependency error.
" + }, + "guardrail":{ + "shape":"GuardrailEvent", + "documentation":"A guardrail event.
" + }, + "internalServerException":{ + "shape":"InternalServerException", + "documentation":"An internal server error occurred. Retry your request.
" + }, + "output":{ + "shape":"RetrieveAndGenerateOutputEvent", + "documentation":"An output event.
" + }, + "resourceNotFoundException":{ + "shape":"ResourceNotFoundException", + "documentation":"The specified resource ARN was not found. For troubleshooting this error, see ResourceNotFound in the Amazon Bedrock User Guide.
" + }, + "serviceQuotaExceededException":{ + "shape":"ServiceQuotaExceededException", + "documentation":"Your request exceeds the service quota for your account. You can view your quotas at Viewing service quotas. You can resubmit your request later.
" + }, + "throttlingException":{ + "shape":"ThrottlingException", + "documentation":"Your request was denied due to exceeding the account quotas for Amazon Bedrock. For troubleshooting this error, see ThrottlingException in the Amazon Bedrock User Guide.
" + }, + "validationException":{ + "shape":"ValidationException", + "documentation":"The input fails to satisfy the constraints specified by Amazon Bedrock. For troubleshooting this error, see ValidationError in the Amazon Bedrock User Guide.
" + } + }, + "documentation":"A retrieve and generate stream response output.
", + "eventstream":true + }, "RetrieveAndGenerateType":{ "type":"string", "enum":[ @@ -3857,6 +4428,10 @@ "retrievalQuery" ], "members":{ + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"Guardrail settings.
" + }, "knowledgeBaseId":{ "shape":"KnowledgeBaseId", "documentation":"The unique identifier of the knowledge base to query.
", @@ -3881,6 +4456,10 @@ "type":"structure", "required":["retrievalResults"], "members":{ + "guardrailAction":{ + "shape":"GuadrailAction", + "documentation":"Specifies if there is a guardrail intervention in the response.
" + }, "nextToken":{ "shape":"NextToken", "documentation":"If there are more results than can fit in the response, the response returns a nextToken
. Use this token in the nextToken
field of another request to retrieve the next batch of results.
Contains configurations for the metadata to use in reranking.
" + }, + "modelConfiguration":{ + "shape":"VectorSearchBedrockRerankingModelConfiguration", + "documentation":"Contains configurations for the reranker model.
" + }, + "numberOfRerankedResults":{ + "shape":"VectorSearchBedrockRerankingConfigurationNumberOfRerankedResultsInteger", + "documentation":"The number of results to return after reranking.
" + } + }, + "documentation":"Contains configurations for reranking with an Amazon Bedrock reranker model.
" + }, + "VectorSearchBedrockRerankingConfigurationNumberOfRerankedResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "VectorSearchBedrockRerankingModelConfiguration":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "additionalModelRequestFields":{ + "shape":"AdditionalModelRequestFields", + "documentation":"A JSON object whose keys are request fields for the model and whose values are values for those fields.
" + }, + "modelArn":{ + "shape":"BedrockRerankingModelArn", + "documentation":"The ARN of the reranker model to use.
" + } + }, + "documentation":"Contains configurations for an Amazon Bedrock reranker model.
" + }, + "VectorSearchRerankingConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "bedrockRerankingConfiguration":{ + "shape":"VectorSearchBedrockRerankingConfiguration", + "documentation":"Contains configurations for an Amazon Bedrock reranker model.
" + }, + "type":{ + "shape":"VectorSearchRerankingConfigurationType", + "documentation":"The type of reranker model.
" + } + }, + "documentation":"Contains configurations for reranking the retrieved results.
" + }, + "VectorSearchRerankingConfigurationType":{ + "type":"string", + "enum":["BEDROCK_RERANKING_MODEL"] + }, "Verb":{ "type":"string", "sensitive":true From d92c008805d59d5d5b500217fd82266aa358a7d0 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:27 +0000 Subject: [PATCH 27/35] Security Incident Response Update: AWS Security Incident Response is a purpose-built security incident solution designed to help customers prepare for, respond to, and recover from security incidents. --- ...ture-SecurityIncidentResponse-214853b.json | 6 + services/securityir/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 137 + .../codegen-resources/endpoint-tests.json | 201 ++ .../codegen-resources/paginators-1.json | 28 + .../codegen-resources/service-2.json | 2331 +++++++++++++++++ .../codegen-resources/waiters-2.json | 5 + 7 files changed, 2768 insertions(+) create mode 100644 .changes/next-release/feature-SecurityIncidentResponse-214853b.json create mode 100644 services/securityir/pom.xml create mode 100644 services/securityir/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/securityir/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/securityir/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/securityir/src/main/resources/codegen-resources/service-2.json create mode 100644 services/securityir/src/main/resources/codegen-resources/waiters-2.json diff --git a/.changes/next-release/feature-SecurityIncidentResponse-214853b.json b/.changes/next-release/feature-SecurityIncidentResponse-214853b.json new file mode 100644 index 00000000000..6455b3f59f1 --- /dev/null +++ b/.changes/next-release/feature-SecurityIncidentResponse-214853b.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Security Incident Response", + "contributor": "", + "description": "AWS Security Incident Response is a purpose-built security incident solution designed to help customers prepare for, respond to, and recover from security incidents." +} diff --git a/services/securityir/pom.xml b/services/securityir/pom.xml new file mode 100644 index 00000000000..dfb55f109a1 --- /dev/null +++ b/services/securityir/pom.xml @@ -0,0 +1,60 @@ + +Grants permission to view an existing membership.
" + }, + "CancelMembership":{ + "name":"CancelMembership", + "http":{ + "method":"PUT", + "requestUri":"/v1/membership/{membershipId}", + "responseCode":200 + }, + "input":{"shape":"CancelMembershipRequest"}, + "output":{"shape":"CancelMembershipResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permissions to cancel an existing membership.
", + "idempotent":true + }, + "CloseCase":{ + "name":"CloseCase", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/close-case", + "responseCode":200 + }, + "input":{"shape":"CloseCaseRequest"}, + "output":{"shape":"CloseCaseResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to close an existing case.
" + }, + "CreateCase":{ + "name":"CreateCase", + "http":{ + "method":"POST", + "requestUri":"/v1/create-case", + "responseCode":201 + }, + "input":{"shape":"CreateCaseRequest"}, + "output":{"shape":"CreateCaseResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to create a new case.
", + "idempotent":true + }, + "CreateCaseComment":{ + "name":"CreateCaseComment", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/create-comment", + "responseCode":201 + }, + "input":{"shape":"CreateCaseCommentRequest"}, + "output":{"shape":"CreateCaseCommentResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to add a comment to an existing case.
", + "idempotent":true + }, + "CreateMembership":{ + "name":"CreateMembership", + "http":{ + "method":"POST", + "requestUri":"/v1/membership", + "responseCode":201 + }, + "input":{"shape":"CreateMembershipRequest"}, + "output":{"shape":"CreateMembershipResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permissions to create a new membership.
", + "idempotent":true + }, + "GetCase":{ + "name":"GetCase", + "http":{ + "method":"GET", + "requestUri":"/v1/cases/{caseId}/get-case", + "responseCode":200 + }, + "input":{"shape":"GetCaseRequest"}, + "output":{"shape":"GetCaseResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grant permission to view a designated case.
" + }, + "GetCaseAttachmentDownloadUrl":{ + "name":"GetCaseAttachmentDownloadUrl", + "http":{ + "method":"GET", + "requestUri":"/v1/cases/{caseId}/get-presigned-url/{attachmentId}", + "responseCode":201 + }, + "input":{"shape":"GetCaseAttachmentDownloadUrlRequest"}, + "output":{"shape":"GetCaseAttachmentDownloadUrlResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to obtain an Amazon S3 presigned URL to download an attachment.
" + }, + "GetCaseAttachmentUploadUrl":{ + "name":"GetCaseAttachmentUploadUrl", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/get-presigned-url", + "responseCode":201 + }, + "input":{"shape":"GetCaseAttachmentUploadUrlRequest"}, + "output":{"shape":"GetCaseAttachmentUploadUrlResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to upload an attachment to a case.
", + "idempotent":true + }, + "GetMembership":{ + "name":"GetMembership", + "http":{ + "method":"GET", + "requestUri":"/v1/membership/{membershipId}", + "responseCode":200 + }, + "input":{"shape":"GetMembershipRequest"}, + "output":{"shape":"GetMembershipResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to get details of a designated service membership.
" + }, + "ListCaseEdits":{ + "name":"ListCaseEdits", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/list-case-edits", + "responseCode":200 + }, + "input":{"shape":"ListCaseEditsRequest"}, + "output":{"shape":"ListCaseEditsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permissions to view the aidt log for edits made to a designated case.
" + }, + "ListCases":{ + "name":"ListCases", + "http":{ + "method":"POST", + "requestUri":"/v1/list-cases", + "responseCode":200 + }, + "input":{"shape":"ListCasesRequest"}, + "output":{"shape":"ListCasesResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to list all cases the requester has access to.
" + }, + "ListComments":{ + "name":"ListComments", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/list-comments", + "responseCode":200 + }, + "input":{"shape":"ListCommentsRequest"}, + "output":{"shape":"ListCommentsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permissions to list and view comments for a designated case.
" + }, + "ListMemberships":{ + "name":"ListMemberships", + "http":{ + "method":"POST", + "requestUri":"/v1/memberships", + "responseCode":200 + }, + "input":{"shape":"ListMembershipsRequest"}, + "output":{"shape":"ListMembershipsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to query the memberships a principal has access to.
" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/v1/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to view currently configured tags on a resource.
" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/v1/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to add a tag(s) to a designated resource.
" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/v1/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to remove a tag(s) from a designate resource.
", + "idempotent":true + }, + "UpdateCase":{ + "name":"UpdateCase", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/update-case", + "responseCode":200 + }, + "input":{"shape":"UpdateCaseRequest"}, + "output":{"shape":"UpdateCaseResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to update an existing case.
" + }, + "UpdateCaseComment":{ + "name":"UpdateCaseComment", + "http":{ + "method":"PUT", + "requestUri":"/v1/cases/{caseId}/update-case-comment/{commentId}", + "responseCode":200 + }, + "input":{"shape":"UpdateCaseCommentRequest"}, + "output":{"shape":"UpdateCaseCommentResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to update an existing case comment.
", + "idempotent":true + }, + "UpdateCaseStatus":{ + "name":"UpdateCaseStatus", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/update-case-status", + "responseCode":201 + }, + "input":{"shape":"UpdateCaseStatusRequest"}, + "output":{"shape":"UpdateCaseStatusResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to update the status for a designated cases. Options include Submitted | Detection and Analysis | Eradication, Containment and Recovery | Post-Incident Activities | Closed
.
Grants access to UpdateMembership to change membership configuration.
", + "idempotent":true + }, + "UpdateResolverType":{ + "name":"UpdateResolverType", + "http":{ + "method":"POST", + "requestUri":"/v1/cases/{caseId}/update-resolver-type", + "responseCode":200 + }, + "input":{"shape":"UpdateResolverTypeRequest"}, + "output":{"shape":"UpdateResolverTypeResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"SecurityIncidentResponseNotActiveException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"Grants permission to update the resolver type for a case.
This is a one-way action and cannot be reversed.
Options include self-supported > AWS-supported.
" + } + }, + "shapes":{ + "AWSAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, + "AWSAccountIds":{ + "type":"list", + "member":{"shape":"AWSAccountId"}, + "max":100, + "min":1 + }, + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Arn":{ + "type":"string", + "max":1010, + "min":12, + "pattern":"arn:aws:security-ir:\\w+?-\\w+?-\\d+:[0-9]{12}:(membership/m-[a-z0-9]{10,32}|case/[0-9]{10})" + }, + "AttachmentId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}" + }, + "AwsRegion":{ + "type":"string", + "enum":[ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-southeast-5", + "ca-central-1", + "ca-west-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "il-central-1", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2" + ] + }, + "AwsService":{ + "type":"string", + "max":50, + "min":3, + "pattern":"[a-zA-Z0-9 -.():]+" + }, + "BatchGetMemberAccountDetailsRequest":{ + "type":"structure", + "required":[ + "membershipId", + "accountIds" + ], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"Required element used in combination with BatchGetMemberAccountDetails to identify the membership ID to query.
", + "location":"uri", + "locationName":"membershipId" + }, + "accountIds":{ + "shape":"AWSAccountIds", + "documentation":"Optional element to query the membership relationship status to a provided list of account IDs.
" + } + } + }, + "BatchGetMemberAccountDetailsResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"GetMembershipAccountDetailItems", + "documentation":"The response element providing responses for requests to GetMembershipAccountDetails.
" + }, + "errors":{ + "shape":"GetMembershipAccountDetailErrors", + "documentation":"The response element providing errors messages for requests to GetMembershipAccountDetails.
" + } + } + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CancelMembershipRequest":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"Required element used in combination with CancelMembershipRequest to identify the membership ID to cancel.
", + "location":"uri", + "locationName":"membershipId" + } + } + }, + "CancelMembershipResponse":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"The response element providing responses for requests to CancelMembershipRequest.
" + } + } + }, + "CaseArn":{ + "type":"string", + "max":80, + "min":12, + "pattern":"arn:aws:security-ir:\\w+?-\\w+?-\\d+:[0-9]{12}:case/[0-9]{10}" + }, + "CaseAttachmentAttributes":{ + "type":"structure", + "required":[ + "attachmentId", + "fileName", + "attachmentStatus", + "creator", + "createdDate" + ], + "members":{ + "attachmentId":{ + "shape":"AttachmentId", + "documentation":"" + }, + "fileName":{ + "shape":"FileName", + "documentation":"" + }, + "attachmentStatus":{ + "shape":"CaseAttachmentStatus", + "documentation":"" + }, + "creator":{ + "shape":"PrincipalId", + "documentation":"" + }, + "createdDate":{ + "shape":"Timestamp", + "documentation":"" + } + }, + "documentation":"" + }, + "CaseAttachmentStatus":{ + "type":"string", + "enum":[ + "Verified", + "Failed", + "Pending" + ] + }, + "CaseAttachmentsList":{ + "type":"list", + "member":{"shape":"CaseAttachmentAttributes"}, + "max":50, + "min":0 + }, + "CaseDescription":{ + "type":"string", + "max":8000, + "min":1, + "sensitive":true + }, + "CaseEditAction":{ + "type":"string", + "max":100, + "min":1 + }, + "CaseEditItem":{ + "type":"structure", + "members":{ + "eventTimestamp":{ + "shape":"Timestamp", + "documentation":"" + }, + "principal":{ + "shape":"String", + "documentation":"" + }, + "action":{ + "shape":"CaseEditAction", + "documentation":"" + }, + "message":{ + "shape":"CaseEditMessage", + "documentation":"" + } + }, + "documentation":"" + }, + "CaseEditItems":{ + "type":"list", + "member":{"shape":"CaseEditItem"} + }, + "CaseEditMessage":{ + "type":"string", + "max":4096, + "min":10 + }, + "CaseId":{ + "type":"string", + "max":32, + "min":10, + "pattern":"\\d{10,32}.*" + }, + "CaseStatus":{ + "type":"string", + "enum":[ + "Submitted", + "Acknowledged", + "Detection and Analysis", + "Containment, Eradication and Recovery", + "Post-incident Activities", + "Ready to Close", + "Closed" + ] + }, + "CaseTitle":{ + "type":"string", + "max":300, + "min":1, + "sensitive":true + }, + "CloseCaseRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element used in combination with CloseCase to identify the case ID to close.
", + "location":"uri", + "locationName":"caseId" + } + } + }, + "CloseCaseResponse":{ + "type":"structure", + "members":{ + "caseStatus":{ + "shape":"CaseStatus", + "documentation":"A response element providing responses for requests to CloseCase. This element responds with the case status following the action.
" + }, + "closedDate":{ + "shape":"Timestamp", + "documentation":"A response element providing responses for requests to CloseCase. This element responds with the case closure date following the action.
" + } + } + }, + "ClosureCode":{ + "type":"string", + "enum":[ + "Investigation Completed", + "Not Resolved", + "False Positive", + "Duplicate" + ] + }, + "CommentBody":{ + "type":"string", + "max":12000, + "min":1, + "sensitive":true + }, + "CommentId":{ + "type":"string", + "max":6, + "min":6, + "pattern":"\\d{6}" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"Element providing the ID of the resource affected.
" + }, + "resourceType":{ + "shape":"String", + "documentation":"Element providing the type of the resource affected.
" + } + }, + "documentation":"", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ContentLength":{ + "type":"long", + "box":true, + "max":104857600, + "min":1 + }, + "CreateCaseCommentRequest":{ + "type":"structure", + "required":[ + "caseId", + "body" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element used in combination with CreateCaseComment to specify a case ID.
", + "location":"uri", + "locationName":"caseId" + }, + "clientToken":{ + "shape":"CreateCaseCommentRequestClientTokenString", + "documentation":"An optional element used in combination with CreateCaseComment.
", + "idempotencyToken":true + }, + "body":{ + "shape":"CommentBody", + "documentation":"Required element used in combination with CreateCaseComment to add content for the new comment.
" + } + } + }, + "CreateCaseCommentRequestClientTokenString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCaseCommentResponse":{ + "type":"structure", + "required":["commentId"], + "members":{ + "commentId":{ + "shape":"CommentId", + "documentation":"Response element indicating the new comment ID.
" + } + } + }, + "CreateCaseRequest":{ + "type":"structure", + "required":[ + "resolverType", + "title", + "description", + "engagementType", + "reportedIncidentStartDate", + "impactedAccounts", + "watchers" + ], + "members":{ + "clientToken":{ + "shape":"CreateCaseRequestClientTokenString", + "documentation":"Required element used in combination with CreateCase.
", + "idempotencyToken":true + }, + "resolverType":{ + "shape":"ResolverType", + "documentation":"Required element used in combination with CreateCase to identify the resolver type. Available resolvers include self-supported | aws-supported.
" + }, + "title":{ + "shape":"CaseTitle", + "documentation":"Required element used in combination with CreateCase to provide a title for the new case.
" + }, + "description":{ + "shape":"CaseDescription", + "documentation":"Required element used in combination with CreateCase to provide a description for the new case.
" + }, + "engagementType":{ + "shape":"EngagementType", + "documentation":"Required element used in combination with CreateCase to provide an engagement type for the new cases. Available engagement types include Security Incident | Investigation
" + }, + "reportedIncidentStartDate":{ + "shape":"Timestamp", + "documentation":"Required element used in combination with CreateCase to provide an initial start date for the unauthorized activity.
" + }, + "impactedAccounts":{ + "shape":"ImpactedAccounts", + "documentation":"Required element used in combination with CreateCase to provide a list of impacted accounts.
" + }, + "watchers":{ + "shape":"Watchers", + "documentation":"Required element used in combination with CreateCase to provide a list of entities to receive notifications for case updates.
" + }, + "threatActorIpAddresses":{ + "shape":"ThreatActorIpList", + "documentation":"An optional element used in combination with CreateCase to provide a list of suspicious internet protocol addresses associated with unauthorized activity.
" + }, + "impactedServices":{ + "shape":"ImpactedServicesList", + "documentation":"An optional element used in combination with CreateCase to provide a list of services impacted.
" + }, + "impactedAwsRegions":{ + "shape":"ImpactedAwsRegionList", + "documentation":"An optional element used in combination with CreateCase to provide a list of impacted regions.
" + }, + "tags":{ + "shape":"TagMap", + "documentation":"An optional element used in combination with CreateCase to add customer specified tags to a case.
" + } + } + }, + "CreateCaseRequestClientTokenString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCaseResponse":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"A response element providing responses for requests to CreateCase. This element responds with the case ID.
" + } + } + }, + "CreateMembershipRequest":{ + "type":"structure", + "required":[ + "membershipName", + "incidentResponseTeam" + ], + "members":{ + "clientToken":{ + "shape":"CreateMembershipRequestClientTokenString", + "documentation":"An optional element used in combination with CreateMembership.
", + "idempotencyToken":true + }, + "membershipName":{ + "shape":"MembershipName", + "documentation":"Required element use in combination with CreateMembership to create a name for the membership.
" + }, + "incidentResponseTeam":{ + "shape":"IncidentResponseTeam", + "documentation":"Required element use in combination with CreateMembership to add customer incident response team members and trusted partners to the membership.
" + }, + "optInFeatures":{ + "shape":"OptInFeatures", + "documentation":"Optional element to enable the monitoring and investigation opt-in features for the service.
" + }, + "tags":{ + "shape":"TagMap", + "documentation":"Optional element for customer configured tags.
" + } + } + }, + "CreateMembershipRequestClientTokenString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateMembershipResponse":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"Response element for CreateMembership providing the newly created membership ID.
" + } + } + }, + "CustomerType":{ + "type":"string", + "enum":[ + "Standalone", + "Organization" + ] + }, + "EmailAddress":{ + "type":"string", + "max":254, + "min":6, + "pattern":"[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*", + "sensitive":true + }, + "EngagementType":{ + "type":"string", + "enum":[ + "Security Incident", + "Investigation" + ] + }, + "FileName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9._-]+", + "sensitive":true + }, + "GetCaseAttachmentDownloadUrlRequest":{ + "type":"structure", + "required":[ + "caseId", + "attachmentId" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for GetCaseAttachmentDownloadUrl to identify the case ID for downloading an attachment from.
", + "location":"uri", + "locationName":"caseId" + }, + "attachmentId":{ + "shape":"AttachmentId", + "documentation":"Required element for GetCaseAttachmentDownloadUrl to identify the attachment ID for downloading an attachment.
", + "location":"uri", + "locationName":"attachmentId" + } + } + }, + "GetCaseAttachmentDownloadUrlResponse":{ + "type":"structure", + "required":["attachmentPresignedUrl"], + "members":{ + "attachmentPresignedUrl":{ + "shape":"Url", + "documentation":"Response element providing the Amazon S3 presigned URL to download an attachment.
" + } + } + }, + "GetCaseAttachmentUploadUrlRequest":{ + "type":"structure", + "required":[ + "caseId", + "fileName", + "contentLength" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for GetCaseAttachmentUploadUrl to identify the case ID for uploading an attachment to.
", + "location":"uri", + "locationName":"caseId" + }, + "fileName":{ + "shape":"FileName", + "documentation":"Required element for GetCaseAttachmentUploadUrl to identify the file name of the attachment to upload.
" + }, + "contentLength":{ + "shape":"ContentLength", + "documentation":"Required element for GetCaseAttachmentUploadUrl to identify the size od the file attachment.
" + }, + "clientToken":{ + "shape":"GetCaseAttachmentUploadUrlRequestClientTokenString", + "documentation":"Optional element for customer provided token.
", + "idempotencyToken":true + } + } + }, + "GetCaseAttachmentUploadUrlRequestClientTokenString":{ + "type":"string", + "max":255, + "min":1 + }, + "GetCaseAttachmentUploadUrlResponse":{ + "type":"structure", + "required":["attachmentPresignedUrl"], + "members":{ + "attachmentPresignedUrl":{ + "shape":"Url", + "documentation":"Response element providing the Amazon S3 presigned UTL to upload the attachment.
" + } + } + }, + "GetCaseRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for GetCase to identify the requested case ID.
", + "location":"uri", + "locationName":"caseId" + } + } + }, + "GetCaseResponse":{ + "type":"structure", + "members":{ + "title":{ + "shape":"CaseTitle", + "documentation":"Response element for GetCase that provides the case title.
" + }, + "caseArn":{ + "shape":"CaseArn", + "documentation":"Response element for GetCase that provides the case ARN
" + }, + "description":{ + "shape":"CaseDescription", + "documentation":"Response element for GetCase that provides contents of the case description.
" + }, + "caseStatus":{ + "shape":"CaseStatus", + "documentation":"Response element for GetCase that provides the case status. Options for statuses include Submitted | Detection and Analysis | Eradication, Containment and Recovery | Post-Incident Activities | Closed
Response element for GetCase that provides the engagement type. Options for engagement type include Active Security Event | Investigations
Response element for GetCase that provides the customer provided incident start date.
" + }, + "actualIncidentStartDate":{ + "shape":"Timestamp", + "documentation":"Response element for GetCase that provides the actual incident start date as identified by data analysis during the investigation.
" + }, + "impactedAwsRegions":{ + "shape":"ImpactedAwsRegionList", + "documentation":"Response element for GetCase that provides the impacted regions.
" + }, + "threatActorIpAddresses":{ + "shape":"ThreatActorIpList", + "documentation":"Response element for GetCase that provides a list of suspicious IP addresses associated with unauthorized activity.
" + }, + "pendingAction":{ + "shape":"PendingAction", + "documentation":"Response element for GetCase that provides identifies the case is waiting on customer input.
" + }, + "impactedAccounts":{ + "shape":"ImpactedAccounts", + "documentation":"Response element for GetCase that provides a list of impacted accounts.
" + }, + "watchers":{ + "shape":"Watchers", + "documentation":"Response element for GetCase that provides a list of Watchers added to the case.
" + }, + "createdDate":{ + "shape":"Timestamp", + "documentation":"Response element for GetCase that provides the date the case was created.
" + }, + "lastUpdatedDate":{ + "shape":"Timestamp", + "documentation":"Response element for GetCase that provides the date a case was last modified.
" + }, + "closureCode":{ + "shape":"ClosureCode", + "documentation":"Response element for GetCase that provides the summary code for why a case was closed.
" + }, + "resolverType":{ + "shape":"ResolverType", + "documentation":"Response element for GetCase that provides the current resolver types. Options include self-supported | AWS-supported
.
Response element for GetCase that provides a list of impacted services.
" + }, + "caseAttachments":{ + "shape":"CaseAttachmentsList", + "documentation":"Response element for GetCase that provides a list of current case attachments.
" + }, + "closedDate":{ + "shape":"Timestamp", + "documentation":"Response element for GetCase that provides the date a specified case was closed.
" + } + } + }, + "GetMembershipAccountDetailError":{ + "type":"structure", + "required":[ + "accountId", + "error", + "message" + ], + "members":{ + "accountId":{ + "shape":"AWSAccountId", + "documentation":"" + }, + "error":{ + "shape":"String", + "documentation":"" + }, + "message":{ + "shape":"String", + "documentation":"" + } + }, + "documentation":"" + }, + "GetMembershipAccountDetailErrors":{ + "type":"list", + "member":{"shape":"GetMembershipAccountDetailError"}, + "max":100, + "min":0 + }, + "GetMembershipAccountDetailItem":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AWSAccountId", + "documentation":"" + }, + "relationshipStatus":{ + "shape":"MembershipAccountRelationshipStatus", + "documentation":"" + }, + "relationshipType":{ + "shape":"MembershipAccountRelationshipType", + "documentation":"" + } + }, + "documentation":"" + }, + "GetMembershipAccountDetailItems":{ + "type":"list", + "member":{"shape":"GetMembershipAccountDetailItem"}, + "max":100, + "min":0 + }, + "GetMembershipRequest":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"Required element for GetMembership to identify the membership ID to query.
", + "location":"uri", + "locationName":"membershipId" + } + } + }, + "GetMembershipResponse":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"Response element for GetMembership that provides the queried membership ID.
" + }, + "accountId":{ + "shape":"AWSAccountId", + "documentation":"Response element for GetMembership that provides the configured account for managing the membership.
" + }, + "region":{ + "shape":"AwsRegion", + "documentation":"Response element for GetMembership that provides the configured region for managing the membership.
" + }, + "membershipName":{ + "shape":"MembershipName", + "documentation":"Response element for GetMembership that provides the configured membership name.
" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"Response element for GetMembership that provides the membership ARN.
" + }, + "membershipStatus":{ + "shape":"MembershipStatus", + "documentation":"Response element for GetMembership that provides the current membership status.
" + }, + "membershipActivationTimestamp":{ + "shape":"Timestamp", + "documentation":"Response element for GetMembership that provides the configured membership activation timestamp.
" + }, + "membershipDeactivationTimestamp":{ + "shape":"Timestamp", + "documentation":"Response element for GetMembership that provides the configured membership name deactivation timestamp.
" + }, + "customerType":{ + "shape":"CustomerType", + "documentation":"Response element for GetMembership that provides the configured membership type. Options include Standalone | Organizations
.
Response element for GetMembership that provides the number of accounts in the membership.
" + }, + "incidentResponseTeam":{ + "shape":"IncidentResponseTeam", + "documentation":"Response element for GetMembership that provides the configured membership incident response team members.
" + }, + "optInFeatures":{ + "shape":"OptInFeatures", + "documentation":"Response element for GetMembership that provides the if opt-in features have been enabled.
" + } + } + }, + "IPAddress":{ + "type":"string", + "pattern":"(?:(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))|(?:(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4})|(?:(?:[A-F0-9]{1,4}:){6}(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))", + "sensitive":true + }, + "ImpactedAccounts":{ + "type":"list", + "member":{"shape":"AWSAccountId"}, + "max":200, + "min":0 + }, + "ImpactedAwsRegion":{ + "type":"structure", + "required":["region"], + "members":{ + "region":{ + "shape":"AwsRegion", + "documentation":"" + } + }, + "documentation":"" + }, + "ImpactedAwsRegionList":{ + "type":"list", + "member":{"shape":"ImpactedAwsRegion"}, + "max":50, + "min":0 + }, + "ImpactedServicesList":{ + "type":"list", + "member":{"shape":"AwsService"}, + "max":600, + "min":0 + }, + "IncidentResponder":{ + "type":"structure", + "required":[ + "name", + "jobTitle", + "email" + ], + "members":{ + "name":{ + "shape":"IncidentResponderName", + "documentation":"" + }, + "jobTitle":{ + "shape":"JobTitle", + "documentation":"" + }, + "email":{ + "shape":"EmailAddress", + "documentation":"" + } + }, + "documentation":"" + }, + "IncidentResponderName":{ + "type":"string", + "max":50, + "min":3, + "sensitive":true + }, + "IncidentResponseTeam":{ + "type":"list", + "member":{"shape":"IncidentResponder"}, + "max":10, + "min":2 + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"Element providing advice to clients on when the call can be safely retried.
", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "InvalidTokenException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"", + "error":{ + "httpStatusCode":423, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "JobTitle":{ + "type":"string", + "max":50, + "min":1, + "sensitive":true + }, + "ListCaseEditsRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "nextToken":{ + "shape":"ListCaseEditsRequestNextTokenString", + "documentation":"Optional element for a customer provided token.
" + }, + "maxResults":{ + "shape":"ListCaseEditsRequestMaxResultsInteger", + "documentation":"Optional element to identify how many results to obtain. There is a maximum value of 25.
" + }, + "caseId":{ + "shape":"CaseId", + "documentation":"Required element used with ListCaseEdits to identify the case to query.
", + "location":"uri", + "locationName":"caseId" + } + } + }, + "ListCaseEditsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListCaseEditsRequestNextTokenString":{ + "type":"string", + "max":2000, + "min":0 + }, + "ListCaseEditsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"Optional element.
" + }, + "items":{ + "shape":"CaseEditItems", + "documentation":"Response element for ListCaseEdits that includes the action, eventtimestamp, message, and principal for the response.
" + }, + "total":{ + "shape":"Integer", + "documentation":"Response element for ListCaseEdits that identifies the total number of edits.
" + } + } + }, + "ListCasesItem":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"" + }, + "lastUpdatedDate":{ + "shape":"Timestamp", + "documentation":"" + }, + "title":{ + "shape":"CaseTitle", + "documentation":"" + }, + "caseArn":{ + "shape":"CaseArn", + "documentation":"" + }, + "engagementType":{ + "shape":"EngagementType", + "documentation":"" + }, + "caseStatus":{ + "shape":"CaseStatus", + "documentation":"" + }, + "createdDate":{ + "shape":"Timestamp", + "documentation":"" + }, + "closedDate":{ + "shape":"Timestamp", + "documentation":"" + }, + "resolverType":{ + "shape":"ResolverType", + "documentation":"" + }, + "pendingAction":{ + "shape":"PendingAction", + "documentation":"" + } + }, + "documentation":"" + }, + "ListCasesItems":{ + "type":"list", + "member":{"shape":"ListCasesItem"} + }, + "ListCasesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"ListCasesRequestNextTokenString", + "documentation":"Optional element.
" + }, + "maxResults":{ + "shape":"ListCasesRequestMaxResultsInteger", + "documentation":"Optional element for ListCases to limit the number of responses.
" + } + } + }, + "ListCasesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListCasesRequestNextTokenString":{ + "type":"string", + "max":2000, + "min":0 + }, + "ListCasesResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"Optional element.
" + }, + "items":{ + "shape":"ListCasesItems", + "documentation":"Response element for ListCases that includes caseARN, caseID, caseStatus, closedDate, createdDate, engagementType, lastUpdatedDate, pendingAction, resolverType, and title for each response.
" + }, + "total":{ + "shape":"Long", + "documentation":"Response element for ListCases providing the total number of responses.
" + } + } + }, + "ListCommentsItem":{ + "type":"structure", + "required":["commentId"], + "members":{ + "commentId":{ + "shape":"CommentId", + "documentation":"" + }, + "createdDate":{ + "shape":"Timestamp", + "documentation":"" + }, + "lastUpdatedDate":{ + "shape":"Timestamp", + "documentation":"" + }, + "creator":{ + "shape":"PrincipalId", + "documentation":"" + }, + "lastUpdatedBy":{ + "shape":"PrincipalId", + "documentation":"" + }, + "body":{ + "shape":"CommentBody", + "documentation":"" + } + }, + "documentation":"" + }, + "ListCommentsItems":{ + "type":"list", + "member":{"shape":"ListCommentsItem"} + }, + "ListCommentsRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "nextToken":{ + "shape":"ListCommentsRequestNextTokenString", + "documentation":"Optional element.
" + }, + "maxResults":{ + "shape":"ListCommentsRequestMaxResultsInteger", + "documentation":"Optional element for ListComments to limit the number of responses.
" + }, + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for ListComments to designate the case to query.
", + "location":"uri", + "locationName":"caseId" + } + } + }, + "ListCommentsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListCommentsRequestNextTokenString":{ + "type":"string", + "max":2000, + "min":0 + }, + "ListCommentsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"Optional request elements.
" + }, + "items":{ + "shape":"ListCommentsItems", + "documentation":"Response element for ListComments providing the body, commentID, createDate, creator, lastUpdatedBy and lastUpdatedDate for each response.
" + }, + "total":{ + "shape":"Integer", + "documentation":"Response element for ListComments identifying the number of responses.
" + } + } + }, + "ListMembershipItem":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"" + }, + "accountId":{ + "shape":"AWSAccountId", + "documentation":"" + }, + "region":{ + "shape":"AwsRegion", + "documentation":"" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"" + }, + "membershipStatus":{ + "shape":"MembershipStatus", + "documentation":"" + } + }, + "documentation":"" + }, + "ListMembershipItems":{ + "type":"list", + "member":{"shape":"ListMembershipItem"} + }, + "ListMembershipsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"ListMembershipsRequestNextTokenString", + "documentation":"Optional element.
" + }, + "maxResults":{ + "shape":"ListMembershipsRequestMaxResultsInteger", + "documentation":"Request element for ListMemberships to limit the number of responses.
" + } + } + }, + "ListMembershipsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListMembershipsRequestNextTokenString":{ + "type":"string", + "max":2000, + "min":0 + }, + "ListMembershipsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"Optional element.
" + }, + "items":{ + "shape":"ListMembershipItems", + "documentation":"Request element for ListMemberships including the accountID, membershipARN, membershipID, membershipStatus, and region for each response.
" + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"Required element for ListTagsForResource to provide the ARN to identify a specific resource.
", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"Response element for ListTagsForResource providing content for each configured tag.
" + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "MembershipAccountRelationshipStatus":{ + "type":"string", + "enum":[ + "Associated", + "Disassociated" + ] + }, + "MembershipAccountRelationshipType":{ + "type":"string", + "enum":["Organization"] + }, + "MembershipArn":{ + "type":"string", + "max":80, + "min":12, + "pattern":"arn:aws:security-ir:\\w+?-\\w+?-\\d+:[0-9]{12}:membership/m-[a-z0-9]{10,32}" + }, + "MembershipId":{ + "type":"string", + "max":34, + "min":12, + "pattern":"m-[a-z0-9]{10,32}" + }, + "MembershipName":{ + "type":"string", + "max":50, + "min":3, + "sensitive":true + }, + "MembershipStatus":{ + "type":"string", + "enum":[ + "Active", + "Cancelled", + "Terminated" + ] + }, + "OptInFeature":{ + "type":"structure", + "required":[ + "featureName", + "isEnabled" + ], + "members":{ + "featureName":{ + "shape":"OptInFeatureName", + "documentation":"" + }, + "isEnabled":{ + "shape":"Boolean", + "documentation":"" + } + }, + "documentation":"" + }, + "OptInFeatureName":{ + "type":"string", + "enum":["Triage"] + }, + "OptInFeatures":{ + "type":"list", + "member":{"shape":"OptInFeature"}, + "max":2, + "min":1 + }, + "PendingAction":{ + "type":"string", + "enum":[ + "Customer", + "None" + ] + }, + "PersonName":{ + "type":"string", + "max":50, + "min":1, + "sensitive":true + }, + "PrincipalId":{ + "type":"string", + "pattern":".*(^internal:midway:([a-z]{3,8}|svc-mw-[0-9]{12}[a-zA-Z0-9-]{5,20})$)|(^external:aws:\\d{12}$).*" + }, + "ResolverType":{ + "type":"string", + "enum":[ + "AWS", + "Self" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SecurityIncidentResponseNotActiveException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SelfManagedCaseStatus":{ + "type":"string", + "enum":[ + "Submitted", + "Detection and Analysis", + "Containment, Eradication and Recovery", + "Post-incident Activities" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"Element that provides the ID of the resource affected.
" + }, + "resourceType":{ + "shape":"String", + "documentation":"Element that provides the type of the resource affected.
" + }, + "serviceCode":{ + "shape":"String", + "documentation":"Element that provides the originating service who made the call.
" + }, + "quotaCode":{ + "shape":"String", + "documentation":"Element that provides the quota that was exceeded.
" + } + }, + "documentation":"", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"Required element for TagResource to identify the ARN for the resource to add a tag to.
", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"Required element for ListTagsForResource to provide the content for a tag.
" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThreatActorIp":{ + "type":"structure", + "required":["ipAddress"], + "members":{ + "ipAddress":{ + "shape":"IPAddress", + "documentation":"" + }, + "userAgent":{ + "shape":"UserAgent", + "documentation":"" + } + }, + "documentation":"" + }, + "ThreatActorIpList":{ + "type":"list", + "member":{"shape":"ThreatActorIp"}, + "max":200, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"Element providing the service code of the originating service.
" + }, + "quotaCode":{ + "shape":"String", + "documentation":"Element providing the quota of the originating service.
" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"Element providing advice to clients on when the call can be safely retried.
", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"Required element for UnTagResource to identify the ARN for the resource to remove a tag from.
", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"Required element for UnTagResource to identify tag to remove.
", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateCaseCommentRequest":{ + "type":"structure", + "required":[ + "caseId", + "commentId", + "body" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for UpdateCaseComment to identify the case ID containing the comment to be updated.
", + "location":"uri", + "locationName":"caseId" + }, + "commentId":{ + "shape":"CommentId", + "documentation":"Required element for UpdateCaseComment to identify the case ID to be updated.
", + "location":"uri", + "locationName":"commentId" + }, + "body":{ + "shape":"CommentBody", + "documentation":"Required element for UpdateCaseComment to identify the content for the comment to be updated.
" + } + } + }, + "UpdateCaseCommentResponse":{ + "type":"structure", + "required":["commentId"], + "members":{ + "commentId":{ + "shape":"CommentId", + "documentation":"Response element for UpdateCaseComment providing the updated comment ID.
" + }, + "body":{ + "shape":"CommentBody", + "documentation":"Response element for UpdateCaseComment providing the updated comment content.
" + } + } + }, + "UpdateCaseRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for UpdateCase to identify the case ID for updates.
", + "location":"uri", + "locationName":"caseId" + }, + "title":{ + "shape":"CaseTitle", + "documentation":"Optional element for UpdateCase to provide content for the title field.
" + }, + "description":{ + "shape":"CaseDescription", + "documentation":"Optional element for UpdateCase to provide content for the description field.
" + }, + "reportedIncidentStartDate":{ + "shape":"Timestamp", + "documentation":"Optional element for UpdateCase to provide content for the customer reported incident start date field.
" + }, + "actualIncidentStartDate":{ + "shape":"Timestamp", + "documentation":"Optional element for UpdateCase to provide content for the incident start date field.
" + }, + "engagementType":{ + "shape":"EngagementType", + "documentation":"Optional element for UpdateCase to provide content for the engagement type field. Available engagement types include Security Incident | Investigation
.
Optional element for UpdateCase to provide content to add additional watchers to a case.
" + }, + "watchersToDelete":{ + "shape":"Watchers", + "documentation":"Optional element for UpdateCase to provide content to remove existing watchers from a case.
" + }, + "threatActorIpAddressesToAdd":{ + "shape":"ThreatActorIpList", + "documentation":"Optional element for UpdateCase to provide content to add additional suspicious IP addresses related to a case.
" + }, + "threatActorIpAddressesToDelete":{ + "shape":"ThreatActorIpList", + "documentation":"Optional element for UpdateCase to provide content to remove suspicious IP addresses from a case.
" + }, + "impactedServicesToAdd":{ + "shape":"ImpactedServicesList", + "documentation":"Optional element for UpdateCase to provide content to add services impacted.
" + }, + "impactedServicesToDelete":{ + "shape":"ImpactedServicesList", + "documentation":"Optional element for UpdateCase to provide content to remove services impacted.
" + }, + "impactedAwsRegionsToAdd":{ + "shape":"ImpactedAwsRegionList", + "documentation":"Optional element for UpdateCase to provide content to add regions impacted.
" + }, + "impactedAwsRegionsToDelete":{ + "shape":"ImpactedAwsRegionList", + "documentation":"Optional element for UpdateCase to provide content to remove regions impacted.
" + }, + "impactedAccountsToAdd":{ + "shape":"ImpactedAccounts", + "documentation":"Optional element for UpdateCase to provide content to add accounts impacted.
" + }, + "impactedAccountsToDelete":{ + "shape":"ImpactedAccounts", + "documentation":"Optional element for UpdateCase to provide content to add accounts impacted.
" + } + } + }, + "UpdateCaseResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCaseStatusRequest":{ + "type":"structure", + "required":[ + "caseId", + "caseStatus" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for UpdateCaseStatus to identify the case to update.
", + "location":"uri", + "locationName":"caseId" + }, + "caseStatus":{ + "shape":"SelfManagedCaseStatus", + "documentation":"Required element for UpdateCaseStatus to identify the status for a case. Options include Submitted | Detection and Analysis | Containment, Eradication and Recovery | Post-incident Activities
.
Response element for UpdateCaseStatus showing the newly configured status.
" + } + } + }, + "UpdateMembershipRequest":{ + "type":"structure", + "required":["membershipId"], + "members":{ + "membershipId":{ + "shape":"MembershipId", + "documentation":"Required element for UpdateMembership to identify the membership to update.
", + "location":"uri", + "locationName":"membershipId" + }, + "membershipName":{ + "shape":"MembershipName", + "documentation":"Optional element for UpdateMembership to update the membership name.
" + }, + "incidentResponseTeam":{ + "shape":"IncidentResponseTeam", + "documentation":"Optional element for UpdateMembership to update the membership name.
" + }, + "optInFeatures":{ + "shape":"OptInFeatures", + "documentation":"Optional element for UpdateMembership to enable or disable opt-in features for the service.
" + } + } + }, + "UpdateMembershipResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateResolverTypeRequest":{ + "type":"structure", + "required":[ + "caseId", + "resolverType" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Required element for UpdateResolverType to identify the case to update.
", + "location":"uri", + "locationName":"caseId" + }, + "resolverType":{ + "shape":"ResolverType", + "documentation":"Required element for UpdateResolverType to identify the new resolver.
" + } + } + }, + "UpdateResolverTypeResponse":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"Response element for UpdateResolver identifying the case ID being updated.
" + }, + "caseStatus":{ + "shape":"CaseStatus", + "documentation":"Response element for UpdateResolver identifying the current status of the case.
" + }, + "resolverType":{ + "shape":"ResolverType", + "documentation":"Response element for UpdateResolver identifying the current resolver of the case.
" + } + } + }, + "Url":{ + "type":"string", + "pattern":"https?://(?:www.)?[a-zA-Z0-9@:._+~#=-]{2,256}\\.[a-z]{2,6}\\b(?:[-a-zA-Z0-9@:%_+.~#?&/=]{0,2048})", + "sensitive":true + }, + "UserAgent":{ + "type":"string", + "max":500, + "min":0 + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"Element that provides the reason the request failed validation.
" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"Element that provides the list of field(s) that caused the error, if applicable.
" + } + }, + "documentation":"", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"" + }, + "message":{ + "shape":"String", + "documentation":"" + } + }, + "documentation":"" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "UNKNOWN_OPERATION", + "CANNOT_PARSE", + "FIELD_VALIDATION_FAILED", + "OTHER" + ] + }, + "Watcher":{ + "type":"structure", + "required":["email"], + "members":{ + "email":{ + "shape":"EmailAddress", + "documentation":"" + }, + "name":{ + "shape":"PersonName", + "documentation":"" + }, + "jobTitle":{ + "shape":"JobTitle", + "documentation":"" + } + }, + "documentation":"" + }, + "Watchers":{ + "type":"list", + "member":{"shape":"Watcher"}, + "max":30, + "min":0 + } + }, + "documentation":"This guide provides documents the action and response elements for customer use of the service.
" +} diff --git a/services/securityir/src/main/resources/codegen-resources/waiters-2.json b/services/securityir/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 00000000000..13f60ee66be --- /dev/null +++ b/services/securityir/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} From f410ee04e275ee503c6b28afb3f43f01127f0f2e Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:07 +0000 Subject: [PATCH 28/35] Amazon FSx Update: FSx API changes to support the public launch of the Amazon FSx Intelligent Tiering for OpenZFS storage class. --- .../feature-AmazonFSx-bef794e.json | 6 ++ .../codegen-resources/service-2.json | 56 ++++++++++++++++--- 2 files changed, 55 insertions(+), 7 deletions(-) create mode 100644 .changes/next-release/feature-AmazonFSx-bef794e.json diff --git a/.changes/next-release/feature-AmazonFSx-bef794e.json b/.changes/next-release/feature-AmazonFSx-bef794e.json new file mode 100644 index 00000000000..c74ac64fbad --- /dev/null +++ b/.changes/next-release/feature-AmazonFSx-bef794e.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "FSx API changes to support the public launch of the Amazon FSx Intelligent Tiering for OpenZFS storage class." +} diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 9236632067f..fc694300260 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -740,7 +740,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.
For FSx for Windows File Server file systems, you can update the following properties:
AuditLogConfiguration
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
SelfManagedActiveDirectoryConfiguration
StorageCapacity
StorageType
ThroughputCapacity
DiskIopsConfiguration
WeeklyMaintenanceStartTime
For FSx for Lustre file systems, you can update the following properties:
AutoImportPolicy
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DataCompressionType
LogConfiguration
LustreRootSquashConfiguration
MetadataConfiguration
PerUnitStorageThroughput
StorageCapacity
WeeklyMaintenanceStartTime
For FSx for ONTAP file systems, you can update the following properties:
AddRouteTableIds
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DiskIopsConfiguration
FsxAdminPassword
HAPairs
RemoveRouteTableIds
StorageCapacity
ThroughputCapacity
ThroughputCapacityPerHAPair
WeeklyMaintenanceStartTime
For FSx for OpenZFS file systems, you can update the following properties:
AddRouteTableIds
AutomaticBackupRetentionDays
CopyTagsToBackups
CopyTagsToVolumes
DailyAutomaticBackupStartTime
DiskIopsConfiguration
RemoveRouteTableIds
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.
For FSx for Windows File Server file systems, you can update the following properties:
AuditLogConfiguration
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
SelfManagedActiveDirectoryConfiguration
StorageCapacity
StorageType
ThroughputCapacity
DiskIopsConfiguration
WeeklyMaintenanceStartTime
For FSx for Lustre file systems, you can update the following properties:
AutoImportPolicy
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DataCompressionType
LogConfiguration
LustreRootSquashConfiguration
MetadataConfiguration
PerUnitStorageThroughput
StorageCapacity
WeeklyMaintenanceStartTime
For FSx for ONTAP file systems, you can update the following properties:
AddRouteTableIds
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DiskIopsConfiguration
FsxAdminPassword
HAPairs
RemoveRouteTableIds
StorageCapacity
ThroughputCapacity
ThroughputCapacityPerHAPair
WeeklyMaintenanceStartTime
For FSx for OpenZFS file systems, you can update the following properties:
AddRouteTableIds
AutomaticBackupRetentionDays
CopyTagsToBackups
CopyTagsToVolumes
DailyAutomaticBackupStartTime
DiskIopsConfiguration
ReadCacheConfiguration
RemoveRouteTableIds
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
Specifies the resource type that's backed up.
" }, - "Volume":{"shape":"Volume"} + "Volume":{"shape":"Volume"}, + "SizeInBytes":{ + "shape":"SizeInBytes", + "documentation":"The size of the backup in bytes. This represents the amount of data that the file system would contain if you restore this backup.
" + } }, "documentation":"A backup of an Amazon FSx for Windows File Server, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx for OpenZFS file system.
" }, @@ -1870,6 +1874,10 @@ "RouteTableIds":{ "shape":"RouteTableIds", "documentation":"(Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.
" + }, + "ReadCacheConfiguration":{ + "shape":"OpenZFSReadCacheConfiguration", + "documentation":"Specifies the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
" } }, "documentation":"The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.
" @@ -1878,7 +1886,6 @@ "type":"structure", "required":[ "FileSystemType", - "StorageCapacity", "SubnetIds" ], "members":{ @@ -1897,7 +1904,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"Sets the storage type for the file system that you're creating. Valid values are SSD
and HDD
.
Set to SSD
to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.
Set to HDD
to use hard disk drive storage. HDD is supported on SINGLE_AZ_2
and MULTI_AZ_1
Windows file system deployment types, and on PERSISTENT_1
Lustre file system deployment types.
Default value is SSD
. For more information, see Storage type options in the FSx for Windows File Server User Guide and Multiple storage options in the FSx for Lustre User Guide.
Sets the storage class for the file system that you're creating. Valid values are SSD
, HDD
, and INTELLIGENT_TIERING
.
Set to SSD
to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.
Set to HDD
to use hard disk drive storage. HDD is supported on SINGLE_AZ_2
and MULTI_AZ_1
Windows file system deployment types, and on PERSISTENT_1
Lustre file system deployment types.
Set to INTELLIGENT_TIERING
to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type.
Default value is SSD
. For more information, see Storage type options in the FSx for Windows File Server User Guide, Multiple storage options in the FSx for Lustre User Guide, and Working with Intelligent-Tiering in the Amazon FSx for OpenZFS User Guide.
Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see ZFS Record size in the Amazon FSx for OpenZFS User Guide.
" + "documentation":"Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). For file systems using the Intelligent-Tiering storage class, valid values are 128, 256, 512, 1024, 2048, or 4096 KiB, with a default of 2048 KiB. For all other file systems, valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB, with a default of 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see ZFS Record size in the Amazon FSx for OpenZFS User Guide.
" }, "DataCompressionType":{ "shape":"OpenZFSDataCompressionType", @@ -3948,7 +3955,7 @@ }, "IntegerRecordSizeKiB":{ "type":"integer", - "max":1024, + "max":4096, "min":4 }, "InternalServerError":{ @@ -4642,6 +4649,10 @@ "EndpointIpAddress":{ "shape":"IpAddress", "documentation":"The IP address of the endpoint that is used to access data or to manage the file system.
" + }, + "ReadCacheConfiguration":{ + "shape":"OpenZFSReadCacheConfiguration", + "documentation":" Required when StorageType
is set to INTELLIGENT_TIERING
. Specifies the optional provisioned SSD read cache.
The configuration for the Amazon FSx for OpenZFS file system.
" @@ -4692,6 +4703,28 @@ "GROUP" ] }, + "OpenZFSReadCacheConfiguration":{ + "type":"structure", + "members":{ + "SizingMode":{ + "shape":"OpenZFSReadCacheSizingMode", + "documentation":"Specifies how the provisioned SSD read cache is sized, as follows:
Set to NO_CACHE
if you do not want to use an SSD read cache with your Intelligent-Tiering file system.
Set to USER_PROVISIONED
to specify the exact size of your SSD read cache.
Set to PROPORTIONAL_TO_THROUGHPUT_CAPACITY
to have your SSD read cache automatically sized based on your throughput capacity.
Required if SizingMode
is set to USER_PROVISIONED
. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).
The configuration for the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
" + }, + "OpenZFSReadCacheSizingMode":{ + "type":"string", + "enum":[ + "NO_CACHE", + "USER_PROVISIONED", + "PROPORTIONAL_TO_THROUGHPUT_CAPACITY" + ] + }, "OpenZFSUserAndGroupQuotas":{ "type":"list", "member":{"shape":"OpenZFSUserOrGroupQuota"}, @@ -5172,6 +5205,10 @@ "documentation":"An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web Services Support.
", "exception":true }, + "SizeInBytes":{ + "type":"long", + "min":0 + }, "SnaplockConfiguration":{ "type":"structure", "members":{ @@ -5398,7 +5435,8 @@ "documentation":"Specifies the file system's storage type.
", "enum":[ "SSD", - "HDD" + "HDD", + "INTELLIGENT_TIERING" ] }, "StorageVirtualMachine":{ @@ -5943,6 +5981,10 @@ "RemoveRouteTableIds":{ "shape":"RouteTableIds", "documentation":"(Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for OpenZFS file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.
" + }, + "ReadCacheConfiguration":{ + "shape":"OpenZFSReadCacheConfiguration", + "documentation":"The configuration for the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
" } }, "documentation":"The configuration updates for an Amazon FSx for OpenZFS file system.
" From 772b6938da657f0176ec9a2f51abe958f695effd Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:23 +0000 Subject: [PATCH 29/35] Amazon EC2 Container Service Update: This release adds support for Container Insights with Enhanced Observability for Amazon ECS. --- ...ure-AmazonEC2ContainerService-889d675.json | 6 +++++ .../codegen-resources/service-2.json | 24 +++++++++---------- 2 files changed, 18 insertions(+), 12 deletions(-) create mode 100644 .changes/next-release/feature-AmazonEC2ContainerService-889d675.json diff --git a/.changes/next-release/feature-AmazonEC2ContainerService-889d675.json b/.changes/next-release/feature-AmazonEC2ContainerService-889d675.json new file mode 100644 index 00000000000..de5346ab27c --- /dev/null +++ b/.changes/next-release/feature-AmazonEC2ContainerService-889d675.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for Container Insights with Enhanced Observability for Amazon ECS." +} diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index 955d7e80c18..4a7000b9d2d 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -135,7 +135,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"Deletes the specified capacity provider.
The FARGATE
and FARGATE_SPOT
capacity providers are reserved and can't be deleted. You can disassociate them from a cluster using either PutCapacityProviderProviders or by deleting the cluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity provider strategy from all services. The UpdateService API can be used to remove a capacity provider from a service's capacity provider strategy. When updating a service, the forceNewDeployment
option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity provider are transitioned to use the capacity from the remaining capacity providers. Only capacity providers that aren't associated with a cluster can be deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Deletes the specified capacity provider.
The FARGATE
and FARGATE_SPOT
capacity providers are reserved and can't be deleted. You can disassociate them from a cluster using either PutClusterCapacityProviders or by deleting the cluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity provider strategy from all services. The UpdateService API can be used to remove a capacity provider from a service's capacity provider strategy. When updating a service, the forceNewDeployment
option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity provider are transitioned to use the capacity from the remaining capacity providers. Only capacity providers that aren't associated with a cluster can be deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Describes one or more of your clusters.
" + "documentation":"Describes one or more of your clusters.
For CLI examples, see describe-clusters.rst on GitHub.
" }, "DescribeContainerInstances":{ "name":"DescribeContainerInstances", @@ -1197,7 +1197,7 @@ }, "assignPublicIp":{ "shape":"AssignPublicIp", - "documentation":"Whether the task's elastic network interface receives a public IP address. The default value is DISABLED
.
Whether the task's elastic network interface receives a public IP address. The default value is ENABLED
.
An object representing the networking details for a task or service. For example awsVpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}
.
The value to set for the cluster setting. The supported values are enabled
and disabled
.
If you set name
to containerInsights
and value
to enabled
, CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the containerInsights
account setting is turned on. If a cluster value is specified, it will override the containerInsights
value set with PutAccountSetting or PutAccountSettingDefault.
The value to set for the cluster setting. The supported values are enhanced
, enabled
, and disabled
.
To use Container Insights with enhanced observability, set the containerInsights
account setting to enhanced
.
To use Container Insights, set the containerInsights
account setting to enabled
.
If a cluster value is specified, it will override the containerInsights
value set with PutAccountSetting or PutAccountSettingDefault.
The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.
" + "documentation":"The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights with enhanced observability or CloudWatch Container Insights for a cluster.
Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.
For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.
" }, "ClusterSettingName":{ "type":"string", @@ -2989,7 +2989,7 @@ "members":{ "cluster":{ "shape":"String", - "documentation":"The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe. If you do not specify a cluster, the default cluster is assumed. This parameter is required if the task or tasks you are describing were launched in any cluster other than the default cluster.
" + "documentation":"The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe. If you do not specify a cluster, the default cluster is assumed. This parameter is required. If you do not specify a value, the default
cluster is used.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 20
GiB and the maximum supported value is 200
GiB.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21
GiB and the maximum supported value is 200
GiB.
The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Using data volumes in tasks in the Amazon ECS Developer Guide;.
For tasks using the Fargate launch type, the task requires the following platforms:
Linux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The cluster that hosts the service. This can either be the cluster name or ARN. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performanceIf you don't specify a cluster, deault
is used.
The cluster that hosts the service. This can either be the cluster name or ARN. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performanceIf you don't specify a cluster, default
is used.
The resource name for which to modify the account setting.
The following are the valid values for the account setting name.
serviceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
taskLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
containerInstanceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
awsvpcTrunking
- When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking
is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.
containerInsights
- When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights
is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.
dualStackIPv6
- When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc
network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.
fargateFIPSMode
- If you specify fargateFIPSMode
, Fargate FIPS 140 compliance is affected.
fargateTaskRetirementWaitPeriod
- When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod
to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
tagResourceAuthorization
- Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster
. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource
action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
guardDutyActivate
- The guardDutyActivate
parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The resource name for which to modify the account setting.
The following are the valid values for the account setting name.
serviceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
taskLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
containerInstanceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
awsvpcTrunking
- When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking
is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.
containerInsights
- Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.
To use Container Insights with enhanced observability, set the containerInsights
account setting to enhanced
.
To use Container Insights, set the containerInsights
account setting to enabled
.
For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.
dualStackIPv6
- When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc
network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.
fargateFIPSMode
- If you specify fargateFIPSMode
, Fargate FIPS 140 compliance is affected.
fargateTaskRetirementWaitPeriod
- When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod
to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
tagResourceAuthorization
- Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster
. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource
action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
guardDutyActivate
- The guardDutyActivate
parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The account setting value for the specified principal ARN. Accepted values are enabled
, disabled
, on
, and off
.
When you specify fargateTaskRetirementWaitPeriod
for the name
, the following are the valid values:
0
- Amazon Web Services sends the notification, and immediately retires the affected tasks.
7
- Amazon Web Services sends the notification, and waits 7 calendar days to retire the tasks.
14
- Amazon Web Services sends the notification, and waits 14 calendar days to retire the tasks.
The account setting value for the specified principal ARN. Accepted values are enabled
, disabled
, on
, enhanced
, and off
.
When you specify fargateTaskRetirementWaitPeriod
for the name
, the following are the valid values:
0
- Amazon Web Services sends the notification, and immediately retires the affected tasks.
7
- Amazon Web Services sends the notification, and waits 7 calendar days to retire the tasks.
14
- Amazon Web Services sends the notification, and waits 14 calendar days to retire the tasks.
The Amazon ECS account setting name to modify.
The following are the valid values for the account setting name.
serviceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
taskLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
containerInstanceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
awsvpcTrunking
- When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking
is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.
containerInsights
- When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights
is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.
dualStackIPv6
- When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc
network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.
fargateTaskRetirementWaitPeriod
- When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod
to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
tagResourceAuthorization
- Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster
. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource
action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
guardDutyActivate
- The guardDutyActivate
parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The Amazon ECS account setting name to modify.
The following are the valid values for the account setting name.
serviceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
taskLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
containerInstanceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
awsvpcTrunking
- When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking
is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.
containerInsights
- Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.
To use Container Insights with enhanced observability, set the containerInsights
account setting to enhanced
.
To use Container Insights, set the containerInsights
account setting to enabled
.
For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.
dualStackIPv6
- When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc
network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.
fargateTaskRetirementWaitPeriod
- When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod
to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
tagResourceAuthorization
- Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster
. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource
action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
guardDutyActivate
- The guardDutyActivate
parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The account setting value for the specified principal ARN. Accepted values are enabled
, disabled
, on
, and off
.
When you specify fargateTaskRetirementWaitPeriod
for the name
, the following are the valid values:
0
- Amazon Web Services sends the notification, and immediately retires the affected tasks.
7
- Amazon Web Services sends the notification, and waits 7 calendar days to retire the tasks.
14
- Amazon Web Services sends the notification, and waits 14 calendar days to retire the tasks.
The account setting value for the specified principal ARN. Accepted values are enabled
, disabled
, enhanced
, on
, and off
.
When you specify fargateTaskRetirementWaitPeriod
for the name
, the following are the valid values:
0
- Amazon Web Services sends the notification, and immediately retires the affected tasks.
7
- Amazon Web Services sends the notification, and waits 7 calendar days to retire the tasks.
14
- Amazon Web Services sends the notification, and waits 14 calendar days to retire the tasks.
Gets details and status of a phone number that’s claimed to your Amazon Connect instance or traffic distribution group.
If the number is claimed to a traffic distribution group, and you are calling in the Amazon Web Services Region where the traffic distribution group was created, you can use either a phone number ARN or UUID value for the PhoneNumberId
URI request parameter. However, if the number is claimed to a traffic distribution group and you are calling this API in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException
.
Gets details and status of a phone number that’s claimed to your Amazon Connect instance or traffic distribution group.
If the number is claimed to a traffic distribution group, and you are calling in the Amazon Web Services Region where the traffic distribution group was created, you can use either a phone number ARN or UUID value for the PhoneNumberId
URI request parameter. However, if the number is claimed to a traffic distribution group and you are calling this API in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you receive a ResourceNotFoundException
.
Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance was created.
Call the DescribePhoneNumber API to verify the status of a previous ImportPhoneNumber
operation.
If you plan to claim or import numbers and then release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired.
By default you can claim or import and then release up to 200% of your maximum number of active phone numbers. If you claim or import and then release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming or importing any more numbers until 180 days past the oldest number released has expired.
For example, if you already have 99 claimed or imported numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services Support ticket.
" + "documentation":"Imports a claimed phone number from an external service, such as Amazon Web Services End User Messaging, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance was created.
Call the DescribePhoneNumber API to verify the status of a previous ImportPhoneNumber
operation.
If you plan to claim or import numbers and then release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired.
By default you can claim or import and then release up to 200% of your maximum number of active phone numbers. If you claim or import and then release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming or importing any more numbers until 180 days past the oldest number released has expired.
For example, if you already have 99 claimed or imported numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services Support ticket.
" }, "ListAgentStatuses":{ "name":"ListAgentStatuses", @@ -3444,7 +3444,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Processes chat integration events from Amazon Web Services or external integrations to Amazon Connect. A chat integration event includes:
SourceId, DestinationId, and Subtype: a set of identifiers, uniquely representing a chat
ChatEvent: details of the chat action to perform such as sending a message, event, or disconnecting from a chat
When a chat integration event is sent with chat identifiers that do not map to an active chat contact, a new chat contact is also created before handling chat action.
Access to this API is currently restricted to Amazon Pinpoint for supporting SMS integration.
" + "documentation":"Processes chat integration events from Amazon Web Services or external integrations to Amazon Connect. A chat integration event includes:
SourceId, DestinationId, and Subtype: a set of identifiers, uniquely representing a chat
ChatEvent: details of the chat action to perform such as sending a message, event, or disconnecting from a chat
When a chat integration event is sent with chat identifiers that do not map to an active chat contact, a new chat contact is also created before handling chat action.
Access to this API is currently restricted to Amazon Web Services End User Messaging for supporting SMS integration.
" }, "SendOutboundEmail":{ "name":"SendOutboundEmail", @@ -3588,7 +3588,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Initiates a new outbound SMS contact to a customer. Response of this API provides the ContactId of the outbound SMS contact created.
SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN
as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER
as Type. ContactFlowId initiates the flow to manage the new SMS contact created.
This API can be used to initiate outbound SMS contacts for an agent or it can also deflect an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action.
For more information about using SMS in Amazon Connect, see the following topics in the Amazon Connect Administrator Guide:
" + "documentation":"Initiates a new outbound SMS contact to a customer. Response of this API provides the ContactId
of the outbound SMS contact created.
SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN
as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER
as Type. ContactFlowId initiates the flow to manage the new SMS contact created.
This API can be used to initiate outbound SMS contacts for an agent, or it can also deflect an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action.
For more information about using SMS in Amazon Connect, see the following topics in the Amazon Connect Administrator Guide:
" }, "StartOutboundEmailContact":{ "name":"StartOutboundEmailContact", @@ -3895,7 +3895,9 @@ {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} ], "documentation":"This API is in preview release for Amazon Connect and is subject to change.
Adds or updates user-defined contact information associated with the specified contact. At least one field to be updated must be present in the request.
You can add or update user-defined contact information for both ongoing and completed contacts.
The identifier of the resource.
" + "documentation":"The identifier of the resource.
Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER
Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER
A list of resource identifiers to retrieve flow associations.
" + "documentation":"A list of resource identifiers to retrieve flow associations.
Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER
Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER
The claimed phone number ARN that was previously imported from the external service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of the phone number that was imported from Amazon Pinpoint.
" + "documentation":"The claimed phone number ARN that was previously imported from the external service, such as Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number that was imported from Amazon Web Services End User Messaging.
" } }, "documentation":"Information about a phone number that has been claimed to your Amazon Connect instance or traffic distribution group.
" @@ -7206,6 +7208,14 @@ "error":{"httpStatusCode":410}, "exception":true }, + "ContactRecordingType":{ + "type":"string", + "enum":[ + "AGENT", + "IVR", + "SCREEN" + ] + }, "ContactReferences":{ "type":"map", "key":{"shape":"ReferenceKey"}, @@ -7938,7 +7948,7 @@ }, "IntegrationArn":{ "shape":"ARN", - "documentation":"The Amazon Resource Name (ARN) of the integration.
When integrating with Amazon Pinpoint, the Amazon Connect and Amazon Pinpoint instances must be in the same account.
The Amazon Resource Name (ARN) of the integration.
When integrating with Amazon Web Services End User Messaging, the Amazon Connect and Amazon Web Services End User Messaging instances must be in the same account.
The identifier of the resource.
", + "documentation":"The identifier of the resource.
Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER
Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER
The identifier of the resource.
", + "documentation":"The identifier of the resource.
Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER
Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER
The filters to apply to returned metrics. You can filter on the following resources:
Agents
Campaigns
Channels
Feature
Queues
Routing profiles
Routing step expression
User hierarchy groups
At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.
For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least one filter requirement.
To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide.
Note the following limits:
Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| ANSWERING_MACHINE_DETECTION_STATUS
| CAMPAIGN
| CAMPAIGN_DELIVERY_EVENT_TYPE
|CASE_TEMPLATE_ARN
| CASE_STATUS
| CHANNEL
| contact/segmentAttributes/connect:Subtype
| DISCONNECT_REASON
| FEATURE
| FLOW_TYPE
| FLOWS_NEXT_RESOURCE_ID
| FLOWS_NEXT_RESOURCE_QUEUE_ID
| FLOWS_OUTCOME_TYPE
| FLOWS_RESOURCE_ID
| INITIATION_METHOD
| RESOURCE_PUBLISHED_TIMESTAMP
| ROUTING_PROFILE
| ROUTING_STEP_EXPRESSION
| QUEUE
| Q_CONNECT_ENABLED
|
Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.
contact_lens_conversational_analytics
is a valid filterValue for the FEATURE
filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.
connect:Chat
, connect:SMS
, connect:Telephony
, and connect:WebRTC
are valid filterValue
examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter
key.
ROUTING_STEP_EXPRESSION
is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.
Q_CONNECT_ENABLED
. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED
filter key.
TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.
FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow
This filter is available only for contact record-driven metrics.
Campaign ARNs are valid filterValues
for the CAMPAIGN
filter key.
The filters to apply to returned metrics. You can filter on the following resources:
Agents
Campaigns
Channels
Feature
Queues
Routing profiles
Routing step expression
User hierarchy groups
At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.
For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least one filter requirement.
To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide.
Note the following limits:
Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| ANSWERING_MACHINE_DETECTION_STATUS
| BOT_ID
| BOT_ALIAS
| BOT_VERSION
| BOT_LOCALE
| BOT_INTENT_NAME
| CAMPAIGN
| CAMPAIGN_DELIVERY_EVENT_TYPE
|CASE_TEMPLATE_ARN
| CASE_STATUS
| CHANNEL
| contact/segmentAttributes/connect:Subtype
| DISCONNECT_REASON
| FEATURE
| FLOW_ACTION_ID
| FLOW_TYPE
| FLOWS_MODULE_RESOURCE_ID
| FLOWS_NEXT_RESOURCE_ID
| FLOWS_NEXT_RESOURCE_QUEUE_ID
| FLOWS_OUTCOME_TYPE
| FLOWS_RESOURCE_ID
| INITIATION_METHOD
| INVOKING_RESOURCE_PUBLISHED_TIMESTAMP
| INVOKING_RESOURCE_TYPE
| PARENT_FLOWS_RESOURCE_ID
| RESOURCE_PUBLISHED_TIMESTAMP
| ROUTING_PROFILE
| ROUTING_STEP_EXPRESSION
| QUEUE
| Q_CONNECT_ENABLED
|
Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.
contact_lens_conversational_analytics
is a valid filterValue for the FEATURE
filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.
connect:Chat
, connect:SMS
, connect:Telephony
, and connect:WebRTC
are valid filterValue
examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter
key.
ROUTING_STEP_EXPRESSION
is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.
Q_CONNECT_ENABLED
. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED
filter key.
TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.
FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow
This filter is available only for contact record-driven metrics.
Campaign ARNs are valid filterValues
for the CAMPAIGN
filter key.
The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.
If no grouping is specified, a summary of all metrics is returned.
Valid grouping keys: AGENT
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| ANSWERING_MACHINE_DETECTION_STATUS
| CAMPAIGN
| CAMPAIGN_DELIVERY_EVENT_TYPE
| CASE_TEMPLATE_ARN
| CASE_STATUS
| CHANNEL
| contact/segmentAttributes/connect:Subtype
| DISCONNECT_REASON
| FLOWS_RESOURCE_ID
| FLOWS_MODULE_RESOURCE_ID
| FLOW_TYPE
| FLOWS_OUTCOME_TYPE
| INITIATION_METHOD
| Q_CONNECT_ENABLED
| QUEUE
| RESOURCE_PUBLISHED_TIMESTAMP
| ROUTING_PROFILE
| ROUTING_STEP_EXPRESSION
The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.
If no grouping is specified, a summary of all metrics is returned.
Valid grouping keys: AGENT
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| ANSWERING_MACHINE_DETECTION_STATUS
| BOT_ID
| BOT_ALIAS
| BOT_VERSION
| BOT_LOCALE
| BOT_INTENT_NAME
| CAMPAIGN
| CAMPAIGN_DELIVERY_EVENT_TYPE
| CASE_TEMPLATE_ARN
| CASE_STATUS
| CHANNEL
| contact/segmentAttributes/connect:Subtype
| DISCONNECT_REASON
| FLOWS_RESOURCE_ID
| FLOWS_MODULE_RESOURCE_ID
| FLOW_ACTION_ID
| FLOW_TYPE
| FLOWS_OUTCOME_TYPE
| INITIATION_METHOD
| INVOKING_RESOURCE_PUBLISHED_TIMESTAMP
| INVOKING_RESOURCE_TYPE
| PARENT_FLOWS_RESOURCE_ID
| Q_CONNECT_ENABLED
| QUEUE
| RESOURCE_PUBLISHED_TIMESTAMP
| ROUTING_PROFILE
| ROUTING_STEP_EXPRESSION
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Abandonment rate
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Adherent time
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent answer rate
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Non-adherent time
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent non-response
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Data for this metric is available starting from October 1, 2023 0:00:00 GMT.
Unit: Percentage
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Occupancy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Adherence
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Scheduled time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average queue abandon time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Average active time
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average after contact work time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. For now, this metric only supports the following as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Average agent API connecting time
The Negate
key in metric-level filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Average agent pause time
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Average contacts per case
Unit: Seconds
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Average case resolution time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average contact duration
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average conversation duration
This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.
Unit: Count
Valid groupings and filters: Agent, Campaign, Queue, Routing Profile
UI name: Average dials per minute
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Average flow time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent greeting time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
UI name: Average handle time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average customer hold time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average holds
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent interaction time
Feature is a valid filter but not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent interruptions
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent interruption time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average non-talk time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average queue answer time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average resolution time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average talk time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent talk time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average customer talk time
This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.
Unit: Seconds
Valid groupings and filters: Campaign
This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.
Unit: Count
Valid groupings and filters: Agent, Campaign
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter GT
(for Greater than).
UI name: Campaign contacts abandoned after X
This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.
Unit: Percent
Valid groupings and filters: Agent, Campaign
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter GT
(for Greater than).
This metric is available only for outbound campaigns using the email delivery mode.
Unit: Count
Valid metric filter key: CAMPAIGN_INTERACTION_EVENT_TYPE
Valid groupings and filters: Campaign
UI name: Campaign interactions
This metric is available only for outbound campaigns.
Unit: Count
Valid groupings and filters: Campaign, Channel, contact/segmentAttributes/connect:Subtype
UI name: Campaign send attempts
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases created
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts created
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
, DISCONNECT_REASON
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
UI name: API contacts handled
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts hold disconnect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts hold agent disconnect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts hold customer disconnect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts put on hold
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts transferred out external
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts transferred out internal
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts queued
Unit: Count
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
UI name: Contacts queued (enqueue timestamp)
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Contacts resolved in X
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts transferred out
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts transferred out by agent
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts transferred out queue
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Current cases
This metric is available only for outbound campaigns.
Unit: Count
Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS
, CAMPAIGN_DELIVERY_EVENT_TYPE
, DISCONNECT_REASON
Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Campaign Delivery EventType, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile
UI name: Delivery attempts
Campaign Delivery EventType filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.
This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.
Unit: Percent
Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS
, CAMPAIGN_DELIVERY_EVENT_TYPE
, DISCONNECT_REASON
Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile
UI name: Delivery attempt disposition rate
Campaign Delivery Event Type filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Flows outcome
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp
UI name: Flows started
This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.
Unit: Count
Valid groupings and filters: Agent, Campaign
UI name: Human answered
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Maximum flow time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Maximum queued time
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Minimum flow time
Unit: Percent
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases resolved on first contact
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.
Unit: Percent
Valid metric filter key: FLOWS_OUTCOME_TYPE
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Flows outcome percentage.
The FLOWS_OUTCOME_TYPE
is not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Non-talk time percent
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Talk time percent
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Agent talk time percent
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Customer talk time percent
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases reopened
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases resolved
You can include up to 20 SERVICE_LEVEL metrics in a request.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Service level X
Unit: Count
Valid groupings and filters: Queue, RoutingStepExpression
UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: After contact work time
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. This metric only supports the following filter keys as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent API connecting time
The Negate
key in metric-level filters is not applicable for this metric.
Unit: Count
Metric filter:
Valid values: API
| Incoming
| Outbound
| Transfer
| Callback
| Queue_Transfer
| Disconnect
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
UI name: Contact abandoned
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Contacts abandoned in X seconds
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Contacts answered in X seconds
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contact flow time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Agent on contact time
Valid metric filter key: DISCONNECT_REASON
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contact disconnected
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Error status time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contact handle time
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Customer hold time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Agent idle time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Agent interaction and hold time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent interaction time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Non-Productive Time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Online time
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Callback attempts
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Abandonment rate
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Adherent time
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent answer rate
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Non-adherent time
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent non-response
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Data for this metric is available starting from October 1, 2023 0:00:00 GMT.
Unit: Percentage
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Occupancy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Adherence
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Scheduled time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average queue abandon time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Average active time
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average after contact work time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. For now, this metric only supports the following as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Average agent API connecting time
The Negate
key in metric-level filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Average agent pause time
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID
UI name: Average bot conversation time
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID
UI name: Average bot conversation turns
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Average contacts per case
Unit: Seconds
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Average case resolution time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average contact duration
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average conversation duration
This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.
Unit: Count
Valid groupings and filters: Agent, Campaign, Queue, Routing Profile
UI name: Average dials per minute
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Average flow time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent greeting time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
UI name: Average handle time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average customer hold time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average holds
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent interaction time
Feature is a valid filter but not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent interruptions
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent interruption time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average non-talk time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average queue answer time
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average resolution time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average talk time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average agent talk time
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Average customer talk time
This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.
Unit: Seconds
Valid groupings and filters: Campaign
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID
UI name: Bot conversations
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Bot intent name, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID
UI name: Bot intents completed
This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.
Unit: Count
Valid groupings and filters: Agent, Campaign
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter GT
(for Greater than).
UI name: Campaign contacts abandoned after X
This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.
Unit: Percent
Valid groupings and filters: Agent, Campaign
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter GT
(for Greater than).
This metric is available only for outbound campaigns using the email delivery mode.
Unit: Count
Valid metric filter key: CAMPAIGN_INTERACTION_EVENT_TYPE
Valid groupings and filters: Campaign
UI name: Campaign interactions
This metric is available only for outbound campaigns.
Unit: Count
Valid groupings and filters: Campaign, Channel, contact/segmentAttributes/connect:Subtype
UI name: Campaign send attempts
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases created
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts created
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
, DISCONNECT_REASON
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
UI name: API contacts handled
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts hold disconnect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts hold agent disconnect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts hold customer disconnect
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts put on hold
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts transferred out external
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contacts transferred out internal
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts queued
Unit: Count
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
UI name: Contacts queued (enqueue timestamp)
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Contacts resolved in X
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts transferred out
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts transferred out by agent
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contacts transferred out queue
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Current cases
This metric is available only for outbound campaigns.
Unit: Count
Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS
, CAMPAIGN_DELIVERY_EVENT_TYPE
, DISCONNECT_REASON
Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Campaign Delivery EventType, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile
UI name: Delivery attempts
Campaign Delivery EventType filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.
This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.
Unit: Percent
Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS
, CAMPAIGN_DELIVERY_EVENT_TYPE
, DISCONNECT_REASON
Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile
UI name: Delivery attempt disposition rate
Campaign Delivery Event Type filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Flows outcome
Unit: Count
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp
UI name: Flows started
This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.
Unit: Count
Valid groupings and filters: Agent, Campaign
UI name: Human answered
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Maximum flow time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Maximum queued time
Unit: Seconds
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Minimum flow time
Unit: Percent
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID
UI name: Percent bot conversations outcome
Unit: Percent
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Bot intent name, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID
UI name: Percent bot intents outcome
Unit: Percent
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases resolved on first contact
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.
Unit: Percent
Valid metric filter key: FLOWS_OUTCOME_TYPE
Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp
UI name: Flows outcome percentage.
The FLOWS_OUTCOME_TYPE
is not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Non-talk time percent
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Talk time percent
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Agent talk time percent
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Customer talk time percent
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases reopened
Unit: Count
Required filter key: CASE_TEMPLATE_ARN
Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS
UI name: Cases resolved
You can include up to 20 SERVICE_LEVEL metrics in a request.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Service level X
Unit: Count
Valid groupings and filters: Queue, RoutingStepExpression
UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: After contact work time
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. This metric only supports the following filter keys as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent API connecting time
The Negate
key in metric-level filters is not applicable for this metric.
Unit: Count
Metric filter:
Valid values: API
| Incoming
| Outbound
| Transfer
| Callback
| Queue_Transfer
| Disconnect
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect
UI name: Contact abandoned
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Contacts abandoned in X seconds
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you can use LT
(for \"Less than\") or LTE
(for \"Less than equal\").
UI name: Contacts answered in X seconds
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contact flow time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Agent on contact time
Valid metric filter key: DISCONNECT_REASON
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Contact disconnected
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Error status time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Contact handle time
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Customer hold time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Agent idle time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect
UI name: Agent interaction and hold time
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
UI name: Agent interaction time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Non-Productive Time
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
UI name: Online time
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect
UI name: Callback attempts
The claimed phone number ARN being imported from the external service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of the phone number to import from Amazon Pinpoint.
" + "documentation":"The claimed phone number ARN being imported from the external service, such as Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number to import from Amazon Web Services End User Messaging.
" }, "PhoneNumberDescription":{ "shape":"PhoneNumberDescription", @@ -14067,7 +14079,9 @@ "CASES_DOMAIN", "APPLICATION", "FILE_SCANNER", - "SES_IDENTITY" + "SES_IDENTITY", + "ANALYTICS_CONNECTOR", + "CALL_TRANSFER_CONNECTOR" ] }, "InternalServiceException":{ @@ -14188,6 +14202,10 @@ "type":"list", "member":{"shape":"IpCidr"} }, + "IvrRecordingTrack":{ + "type":"string", + "enum":["ALL"] + }, "JoinToken":{ "type":"string", "sensitive":true @@ -14942,9 +14960,11 @@ "ListFlowAssociationResourceType":{ "type":"string", "enum":[ + "WHATSAPP_MESSAGING_PHONE_NUMBER", "VOICE_PHONE_NUMBER", "INBOUND_EMAIL", - "OUTBOUND_EMAIL" + "OUTBOUND_EMAIL", + "ANALYTICS_CONNECTOR" ] }, "ListFlowAssociationsRequest":{ @@ -15356,7 +15376,7 @@ }, "SourcePhoneNumberArn":{ "shape":"ARN", - "documentation":"The claimed phone number ARN that was previously imported from the external service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of the phone number that was imported from Amazon Pinpoint.
" + "documentation":"The claimed phone number ARN that was previously imported from the external service, such as Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number that was imported from Amazon Web Services End User Messaging.
" } }, "documentation":"Information about phone numbers that have been claimed to your Amazon Connect instance or traffic distribution group.
" @@ -16642,7 +16662,7 @@ }, "MetricFilterValues":{ "shape":"MetricFilterValueList", - "documentation":"The values to use for filtering data. Values for metric-level filters can be either a fixed set of values or a customized list, depending on the use case.
For valid values of metric-level filters INITIATION_METHOD
, DISCONNECT_REASON
, and ANSWERING_MACHINE_DETECTION_STATUS
, see ContactTraceRecord in the Amazon Connect Administrator Guide.
For valid values of the metric-level filter FLOWS_OUTCOME_TYPE
, see the description for the Flow outcome metric in the Amazon Connect Administrator Guide.
The values to use for filtering data. Values for metric-level filters can be either a fixed set of values or a customized list, depending on the use case.
For valid values of metric-level filters INITIATION_METHOD
, DISCONNECT_REASON
, and ANSWERING_MACHINE_DETECTION_STATUS
, see ContactTraceRecord in the Amazon Connect Administrator Guide.
For valid values of the metric-level filter FLOWS_OUTCOME_TYPE
, see the description for the Flow outcome metric in the Amazon Connect Administrator Guide.
For valid values of the metric-level filter BOT_CONVERSATION_OUTCOME_TYPE
, see the description for the Bot conversations completed in the Amazon Connect Administrator Guide.
For valid values of the metric-level filter BOT_INTENT_OUTCOME_TYPE
, see the description for the Bot intents completed metric in the Amazon Connect Administrator Guide.
If this contact was queued, this contains information about the queue.
" }, + "QueueInfoInput":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"QueueId", + "documentation":"The identifier of the queue.
" + } + }, + "documentation":"Information about a queue.
" + }, "QueueMaxContacts":{ "type":"integer", "min":0 @@ -19207,6 +19237,10 @@ "InitialContactId":{ "shape":"ContactId", "documentation":"The identifier of the contact. This is the identifier of the contact associated with the first interaction with the contact center.
" + }, + "ContactRecordingType":{ + "shape":"ContactRecordingType", + "documentation":"The type of recording being operated on.
" } } }, @@ -20792,7 +20826,7 @@ }, "Arn":{ "shape":"ARN", - "documentation":"The Amazon Resource Name (ARN) for the secruity profile.
" + "documentation":"The Amazon Resource Name (ARN) for the security profile.
" }, "SecurityProfileName":{ "shape":"SecurityProfileName", @@ -21006,11 +21040,11 @@ }, "DestinationId":{ "shape":"DestinationId", - "documentation":"Chat system identifier, used in part to uniquely identify chat. This is associated with the Amazon Connect instance and flow to be used to start chats. For SMS, this is the phone number destination of inbound SMS messages represented by an Amazon Pinpoint phone number ARN.
" + "documentation":"Chat system identifier, used in part to uniquely identify chat. This is associated with the Amazon Connect instance and flow to be used to start chats. For Server Migration Service, this is the phone number destination of inbound Server Migration Service messages represented by an Amazon Web Services End User Messaging phone number ARN.
" }, "Subtype":{ "shape":"Subtype", - "documentation":"Classification of a channel. This is used in part to uniquely identify chat.
Valid value: [\"connect:sms\"]
Classification of a channel. This is used in part to uniquely identify chat.
Valid value: [\"connect:sms\", connect:\"WhatsApp\"]
The identifier of the contact. This is the identifier of the contact associated with the first interaction with the contact center.
" + }, + "ContactRecordingType":{ + "shape":"ContactRecordingType", + "documentation":"The type of recording being operated on.
" } } }, @@ -22259,6 +22297,10 @@ "InitialContactId":{ "shape":"ContactId", "documentation":"The identifier of the contact. This is the identifier of the contact associated with the first interaction with the contact center.
" + }, + "ContactRecordingType":{ + "shape":"ContactRecordingType", + "documentation":"The type of recording being operated on.
" } } }, @@ -22395,7 +22437,7 @@ "members":{ "key":{ "shape":"TagKey", - "documentation":"The tag key in the tagSet.
" + "documentation":"The tag key in the TagSet.
" }, "value":{ "shape":"TagValue", @@ -23432,6 +23474,22 @@ "SegmentAttributes":{ "shape":"SegmentAttributes", "documentation":"A set of system defined key-value pairs stored on individual contact segments (unique contact ID) using an attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in flows.
Attribute keys can include only alphanumeric, -, and _.
This field can be used to show channel subtype, such as connect:Guide
.
Currently Contact Expiry is the only segment attribute which can be updated by using the UpdateContact API.
" + }, + "QueueInfo":{ + "shape":"QueueInfoInput", + "documentation":"Information about the queue associated with a contact. This parameter can only be updated for external audio contacts. It is used when you integrate third-party systems with Contact Lens for analytics. For more information, see Amazon Connect Contact Lens integration in the Amazon Connect Administrator Guide.
" + }, + "UserInfo":{ + "shape":"UserInfo", + "documentation":"Information about the agent associated with a contact. This parameter can only be updated for external audio contacts. It is used when you integrate third-party systems with Contact Lens for analytics. For more information, see Amazon Connect Contact Lens integration in the Amazon Connect Administrator Guide.
" + }, + "CustomerEndpoint":{ + "shape":"Endpoint", + "documentation":"The endpoint of the customer for which the contact was initiated. For external audio contacts, this is usually the end customer's phone number. This value can only be updated for external audio contacts. For more information, see Amazon Connect Contact Lens integration in the Amazon Connect Administrator Guide.
" + }, + "SystemEndpoint":{ + "shape":"Endpoint", + "documentation":"External system endpoint for the contact was initiated. For external audio contacts, this is the phone number of the external system such as the contact center. This value can only be updated for external audio contacts. For more information, see Amazon Connect Contact Lens integration in the Amazon Connect Administrator Guide.
" } } }, @@ -25710,6 +25768,10 @@ "VoiceRecordingTrack":{ "shape":"VoiceRecordingTrack", "documentation":"Identifies which track is being recorded.
" + }, + "IvrRecordingTrack":{ + "shape":"IvrRecordingTrack", + "documentation":"Identifies which IVR track is being recorded.
" } }, "documentation":"Contains information about the recording configuration settings.
" @@ -25740,5 +25802,5 @@ }, "timestamp":{"type":"timestamp"} }, - "documentation":"Amazon Connect is a cloud-based contact center solution that you use to set up and manage a customer contact center and provide reliable customer engagement at any scale.
Amazon Connect provides metrics and real-time reporting that enable you to optimize contact routing. You can also resolve customer issues more efficiently by getting customers in touch with the appropriate agents.
There are limits to the number of Amazon Connect resources that you can create. There are also limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.
You can connect programmatically to an Amazon Web Services service by using an endpoint. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints.
" + "documentation":"Amazon Connect is a cloud-based contact center solution that you use to set up and manage a customer contact center and provide reliable customer engagement at any scale.
Amazon Connect provides metrics and real-time reporting that enable you to optimize contact routing. You can also resolve customer issues more efficiently by getting customers in touch with the appropriate agents.
There are limits to the number of Amazon Connect resources that you can create. There are also limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.
You can use an endpoint to connect programmatically to an Amazon Web Services service. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints.
" } From 1c8b2df45ab68e43204aac25f1dbe3fda58d9b05 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:21 +0000 Subject: [PATCH 31/35] Amazon Elastic Kubernetes Service Update: Added support for Auto Mode Clusters, Hybrid Nodes, and specifying computeTypes in the DescribeAddonVersions API. --- ...mazonElasticKubernetesService-d3cdaff.json | 6 + .../codegen-resources/service-2.json | 187 +++++++++++++++++- 2 files changed, 191 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json diff --git a/.changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json b/.changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json new file mode 100644 index 00000000000..c1fb832ade5 --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Added support for Auto Mode Clusters, Hybrid Nodes, and specifying computeTypes in the DescribeAddonVersions API." +} diff --git a/services/eks/src/main/resources/codegen-resources/service-2.json b/services/eks/src/main/resources/codegen-resources/service-2.json index c5b551fc8e3..1a80210d64b 100644 --- a/services/eks/src/main/resources/codegen-resources/service-2.json +++ b/services/eks/src/main/resources/codegen-resources/service-2.json @@ -1286,6 +1286,10 @@ "shape":"StringList", "documentation":"The architectures that the version supports.
" }, + "computeTypes":{ + "shape":"StringList", + "documentation":"Indicates the compute type of the addon version.
" + }, "compatibilities":{ "shape":"Compatibilities", "documentation":"An object representing the compatibilities of a version.
" @@ -1485,6 +1489,16 @@ "error":{"httpStatusCode":400}, "exception":true }, + "BlockStorage":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"BoxedBoolean", + "documentation":"Indicates if the block storage capability is enabled on your EKS Auto Mode cluster. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account.
" + } + }, + "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.
" + }, "Boolean":{"type":"boolean"}, "BoxedBoolean":{ "type":"boolean", @@ -1669,6 +1683,18 @@ "zonalShiftConfig":{ "shape":"ZonalShiftConfigResponse", "documentation":"The configuration for zonal shift for the cluster.
" + }, + "remoteNetworkConfig":{ + "shape":"RemoteNetworkConfigResponse", + "documentation":"The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created.
" + }, + "computeConfig":{ + "shape":"ComputeConfigResponse", + "documentation":"Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the EKS User Guide.
" + }, + "storageConfig":{ + "shape":"StorageConfigResponse", + "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.
" } }, "documentation":"An object representing an Amazon EKS cluster.
" @@ -1768,6 +1794,42 @@ }, "documentation":"Compatibility information.
" }, + "ComputeConfigRequest":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"BoxedBoolean", + "documentation":"Request to enable or disable the compute capability on your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account.
" + }, + "nodePools":{ + "shape":"StringList", + "documentation":"Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide.
" + }, + "nodeRoleArn":{ + "shape":"String", + "documentation":"The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the EKS User Guide.
" + } + }, + "documentation":"Request to update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode compute capability in the EKS User Guide.
" + }, + "ComputeConfigResponse":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"BoxedBoolean", + "documentation":"Indicates if the compute capability is enabled on your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account.
" + }, + "nodePools":{ + "shape":"StringList", + "documentation":"Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide.
" + }, + "nodeRoleArn":{ + "shape":"String", + "documentation":"The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster.
" + } + }, + "documentation":"Indicates the status of the request to update the compute capability of your EKS Auto Mode cluster.
" + }, "ConnectorConfigProvider":{ "type":"string", "enum":[ @@ -2024,6 +2086,18 @@ "zonalShiftConfig":{ "shape":"ZonalShiftConfigRequest", "documentation":"Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.
Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary.
You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide .
" + }, + "remoteNetworkConfig":{ + "shape":"RemoteNetworkConfigRequest", + "documentation":"The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created.
" + }, + "computeConfig":{ + "shape":"ComputeConfigRequest", + "documentation":"Enable or disable the compute capability of EKS Auto Mode when creating your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account
" + }, + "storageConfig":{ + "shape":"StorageConfigRequest", + "documentation":"Enable or disable the block storage capability of EKS Auto Mode when creating your EKS Auto Mode cluster. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account.
" } } }, @@ -3076,6 +3150,16 @@ "type":"string", "enum":["MONTHS"] }, + "ElasticLoadBalancing":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"BoxedBoolean", + "documentation":"Indicates if the load balancing capability is enabled on your EKS Auto Mode cluster. If the load balancing capability is enabled, EKS Auto Mode will create and delete load balancers in your Amazon Web Services account.
" + } + }, + "documentation":"Indicates the current configuration of the load balancing capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide.
" + }, "EncryptionConfig":{ "type":"structure", "members":{ @@ -3584,6 +3668,10 @@ "ipFamily":{ "shape":"IpFamily", "documentation":"Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, ipv4
is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify ipv6
, the VPC and subnets that you specify for cluster creation must have both IPv4
and IPv6
CIDR blocks assigned to them. You can't specify ipv6
for clusters in China Regions.
You can only specify ipv6
for 1.21
and later clusters that use version 1.10.1
or later of the Amazon VPC CNI add-on. If you specify ipv6
, then ensure that your VPC meets the requirements listed in the considerations listed in Assigning IPv6 addresses to pods and services in the Amazon EKS User Guide. Kubernetes assigns services IPv6
addresses from the unique local address range (fc00::/7)
. You can't specify a custom IPv6
CIDR block. Pod addresses are assigned from the subnet's IPv6
CIDR.
Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide.
" } }, "documentation":"The Kubernetes network configuration for the cluster.
" @@ -3602,6 +3690,10 @@ "ipFamily":{ "shape":"IpFamily", "documentation":"The IP family used to assign Kubernetes Pod
and Service
objects IP addresses. The IP family is always ipv4
, unless you have a 1.21
or later cluster running version 1.10.1
or later of the Amazon VPC CNI plugin for Kubernetes and specified ipv6
when you created the cluster.
Indicates the current configuration of the load balancing capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled.
" } }, "documentation":"The Kubernetes network configuration for the cluster. The response contains a value for serviceIpv6Cidr or serviceIpv4Cidr, but not both.
" @@ -4766,6 +4858,64 @@ }, "documentation":"An object representing the remote access configuration for the managed node group.
" }, + "RemoteNetworkConfigRequest":{ + "type":"structure", + "members":{ + "remoteNodeNetworks":{ + "shape":"RemoteNodeNetworkList", + "documentation":"The list of network CIDRs that can contain hybrid nodes.
" + }, + "remotePodNetworks":{ + "shape":"RemotePodNetworkList", + "documentation":"The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.
" + } + }, + "documentation":"The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created.
" + }, + "RemoteNetworkConfigResponse":{ + "type":"structure", + "members":{ + "remoteNodeNetworks":{ + "shape":"RemoteNodeNetworkList", + "documentation":"The list of network CIDRs that can contain hybrid nodes.
" + }, + "remotePodNetworks":{ + "shape":"RemotePodNetworkList", + "documentation":"The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.
" + } + }, + "documentation":"The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created.
" + }, + "RemoteNodeNetwork":{ + "type":"structure", + "members":{ + "cidrs":{ + "shape":"StringList", + "documentation":"A network CIDR that can contain hybrid nodes.
" + } + }, + "documentation":"A network CIDR that can contain hybrid nodes.
" + }, + "RemoteNodeNetworkList":{ + "type":"list", + "member":{"shape":"RemoteNodeNetwork"}, + "max":1 + }, + "RemotePodNetwork":{ + "type":"structure", + "members":{ + "cidrs":{ + "shape":"StringList", + "documentation":"A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.
" + } + }, + "documentation":"A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.
" + }, + "RemotePodNetworkList":{ + "type":"list", + "member":{"shape":"RemotePodNetwork"}, + "max":1 + }, "ResolveConflicts":{ "type":"string", "enum":[ @@ -4913,6 +5063,26 @@ "exception":true, "fault":true }, + "StorageConfigRequest":{ + "type":"structure", + "members":{ + "blockStorage":{ + "shape":"BlockStorage", + "documentation":"Request to configure EBS Block Storage settings for your EKS Auto Mode cluster.
" + } + }, + "documentation":"Request to update the configuration of the storage capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.
" + }, + "StorageConfigResponse":{ + "type":"structure", + "members":{ + "blockStorage":{ + "shape":"BlockStorage", + "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled.
" + } + }, + "documentation":"Indicates the status of the request to update the block storage capability of your EKS Auto Mode cluster.
" + }, "String":{"type":"string"}, "StringList":{ "type":"list", @@ -5217,6 +5387,15 @@ "zonalShiftConfig":{ "shape":"ZonalShiftConfigRequest", "documentation":"Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.
Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary.
You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide .
" + }, + "computeConfig":{ + "shape":"ComputeConfigRequest", + "documentation":"Update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability.
" + }, + "kubernetesNetworkConfig":{"shape":"KubernetesNetworkConfigRequest"}, + "storageConfig":{ + "shape":"StorageConfigRequest", + "documentation":"Update the configuration of the block storage capability of your EKS Auto Mode cluster. For example, enable the capability.
" } } }, @@ -5448,7 +5627,10 @@ "AuthenticationMode", "PodIdentityAssociations", "UpgradePolicy", - "ZonalShiftConfig" + "ZonalShiftConfig", + "ComputeConfig", + "StorageConfig", + "KubernetesNetworkConfig" ] }, "UpdateParams":{ @@ -5531,7 +5713,8 @@ "VpcConfigUpdate", "AccessConfigUpdate", "UpgradePolicyUpdate", - "ZonalShiftConfigUpdate" + "ZonalShiftConfigUpdate", + "AutoModeUpdate" ] }, "UpgradePolicyRequest":{ From 596a4506a5197ca90683d929a10e5566196c331f Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:08 +0000 Subject: [PATCH 32/35] Amazon Elastic Compute Cloud Update: Adds support for declarative policies that allow you to enforce desired configuration across an AWS organization through configuring account attributes. Adds support for Allowed AMIs that allows you to limit the use of AMIs in AWS accounts. Adds support for connectivity over non-HTTP protocols. --- ...ure-AmazonElasticComputeCloud-c98c8a3.json | 6 + .../codegen-resources/service-2.json | 1547 ++++++++++++++++- 2 files changed, 1523 insertions(+), 30 deletions(-) create mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json b/.changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json new file mode 100644 index 00000000000..b02223c87bd --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Adds support for declarative policies that allow you to enforce desired configuration across an AWS organization through configuring account attributes. Adds support for Allowed AMIs that allows you to limit the use of AMIs in AWS accounts. Adds support for connectivity over non-HTTP protocols." +} diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index b988267798a..9d7e611990b 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -491,6 +491,16 @@ "input":{"shape":"CancelConversionRequest"}, "documentation":"Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.
" }, + "CancelDeclarativePoliciesReport":{ + "name":"CancelDeclarativePoliciesReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelDeclarativePoliciesReportRequest"}, + "output":{"shape":"CancelDeclarativePoliciesReportResult"}, + "documentation":"Cancels the generation of an account status report.
You can only cancel a report while it has the running
status. Reports with other statuses (complete
, cancelled
, or error
) can't be canceled.
For more information, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.
" + }, "CancelExportTask":{ "name":"CancelExportTask", "http":{ @@ -2507,6 +2517,16 @@ "output":{"shape":"DescribeCustomerGatewaysResult"}, "documentation":"Describes one or more of your VPN customer gateways.
For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN User Guide.
" }, + "DescribeDeclarativePoliciesReports":{ + "name":"DescribeDeclarativePoliciesReports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeclarativePoliciesReportsRequest"}, + "output":{"shape":"DescribeDeclarativePoliciesReportsResult"}, + "documentation":"Describes the metadata of an account status report, including the status of the report.
To view the full report, download it from the Amazon S3 bucket where it was saved. Reports are accessible only when they have the complete
status. Reports with other statuses (running
, cancelled
, or error
) are not available in the S3 bucket. For more information about downloading objects from an S3 bucket, see Downloading objects in the Amazon Simple Storage Service User Guide.
For more information, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.
" + }, "DescribeDhcpOptions":{ "name":"DescribeDhcpOptions", "http":{ @@ -2715,7 +2735,7 @@ }, "input":{"shape":"DescribeImagesRequest"}, "output":{"shape":"DescribeImagesResult"}, - "documentation":"Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
The images available to you include public images, private images that you own, and private images owned by other Amazon Web Services accounts for which you have explicit launch permissions.
Recently deregistered images appear in the returned results for a short interval and then return empty results. After all instances that reference a deregistered AMI are terminated, specifying the ID of the image will eventually return an error indicating that the AMI ID cannot be found.
We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.
The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.
Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
The images available to you include public images, private images that you own, and private images owned by other Amazon Web Services accounts for which you have explicit launch permissions.
Recently deregistered images appear in the returned results for a short interval and then return empty results. After all instances that reference a deregistered AMI are terminated, specifying the ID of the image will eventually return an error indicating that the AMI ID cannot be found.
When Allowed AMIs is set to enabled
, only allowed images are returned in the results, with the imageAllowed
field set to true
for each image. In audit-mode
, the imageAllowed
field is set to true
for images that meet the account's Allowed AMIs criteria, and false
for images that don't meet the criteria. For more information, see EnableAllowedImagesSettings.
We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.
The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.
Describes the AMI that was used to launch an instance, even if the AMI is deprecated, deregistered, or made private (no longer public or shared with your account).
If you specify instance IDs, the output includes information for only the specified instances. If you specify filters, the output includes information for only those instances that meet the filter criteria. If you do not specify instance IDs or filters, the output includes information for all instances, which can affect performance.
If you specify an instance ID that is not valid, an instance that doesn't exist, or an instance that you do not own, an error (InvalidInstanceID.NotFound
) is returned.
Recently terminated instances might appear in the returned results. This interval is usually less than one hour.
In the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected Availability Zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs that are in an unaffected Availability Zone, the call works normally.
The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.
Describes the AMI that was used to launch an instance, even if the AMI is deprecated, deregistered, made private (no longer public or shared with your account), or not allowed.
If you specify instance IDs, the output includes information for only the specified instances. If you specify filters, the output includes information for only those instances that meet the filter criteria. If you do not specify instance IDs or filters, the output includes information for all instances, which can affect performance.
If you specify an instance ID that is not valid, an instance that doesn't exist, or an instance that you do not own, an error (InvalidInstanceID.NotFound
) is returned.
Recently terminated instances might appear in the returned results. This interval is usually less than one hour.
In the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected Availability Zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs that are in an unaffected Availability Zone, the call works normally.
The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.
This action is deprecated.
Describes the ClassicLink DNS support status of one or more VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance.
" }, + "DescribeVpcEndpointAssociations":{ + "name":"DescribeVpcEndpointAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointAssociationsRequest"}, + "output":{"shape":"DescribeVpcEndpointAssociationsResult"}, + "documentation":"Describes the VPC resources, VPC endpoint services, Amazon Lattice services, or service networks associated with the VPC endpoint.
" + }, "DescribeVpcEndpointConnectionNotifications":{ "name":"DescribeVpcEndpointConnectionNotifications", "http":{ @@ -3914,6 +3944,16 @@ "output":{"shape":"DisableAddressTransferResult"}, "documentation":"Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.
" }, + "DisableAllowedImagesSettings":{ + "name":"DisableAllowedImagesSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableAllowedImagesSettingsRequest"}, + "output":{"shape":"DisableAllowedImagesSettingsResult"}, + "documentation":"Disables Allowed AMIs for your account in the specified Amazon Web Services Region. When set to disabled
, the image criteria in your Allowed AMIs settings do not apply, and no restrictions are placed on AMI discoverability or usage. Users in your account can launch instances using any public AMI or AMI shared with your account.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of the criteria you set, the AMIs created by your account will always be discoverable and usable by users in your account.
For more information, see Control the discovery and use of AMIs in Amazon EC2 with Allowed AMIs in Amazon EC2 User Guide.
" + }, "DisableAwsNetworkPerformanceMetricSubscription":{ "name":"DisableAwsNetworkPerformanceMetricSubscription", "http":{ @@ -4241,6 +4281,16 @@ "output":{"shape":"EnableAddressTransferResult"}, "documentation":"Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.
" }, + "EnableAllowedImagesSettings":{ + "name":"EnableAllowedImagesSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableAllowedImagesSettingsRequest"}, + "output":{"shape":"EnableAllowedImagesSettingsResult"}, + "documentation":"Enables Allowed AMIs for your account in the specified Amazon Web Services Region. Two values are accepted:
enabled
: The image criteria in your Allowed AMIs settings are applied. As a result, only AMIs matching these criteria are discoverable and can be used by your account to launch instances.
audit-mode
: The image criteria in your Allowed AMIs settings are not applied. No restrictions are placed on AMI discoverability or usage. Users in your account can launch instances using any public AMI or AMI shared with your account.
The purpose of audit-mode
is to indicate which AMIs will be affected when Allowed AMIs is enabled
. In audit-mode
, each AMI displays either \"ImageAllowed\": true
or \"ImageAllowed\": false
to indicate whether the AMI will be discoverable and available to users in the account when Allowed AMIs is enabled.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of the criteria you set, the AMIs created by your account will always be discoverable and usable by users in your account.
For more information, see Control the discovery and use of AMIs in Amazon EC2 with Allowed AMIs in Amazon EC2 User Guide.
" + }, "EnableAwsNetworkPerformanceMetricSubscription":{ "name":"EnableAwsNetworkPerformanceMetricSubscription", "http":{ @@ -4449,6 +4499,26 @@ "output":{"shape":"ExportTransitGatewayRoutesResult"}, "documentation":"Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.
The routes are saved to the specified bucket in a JSON file. For more information, see Export route tables to Amazon S3 in the Amazon Web Services Transit Gateways Guide.
" }, + "ExportVerifiedAccessInstanceClientConfiguration":{ + "name":"ExportVerifiedAccessInstanceClientConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportVerifiedAccessInstanceClientConfigurationRequest"}, + "output":{"shape":"ExportVerifiedAccessInstanceClientConfigurationResult"}, + "documentation":"Exports the client configuration for a Verified Access instance.
" + }, + "GetAllowedImagesSettings":{ + "name":"GetAllowedImagesSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAllowedImagesSettingsRequest"}, + "output":{"shape":"GetAllowedImagesSettingsResult"}, + "documentation":"Gets the current state of the Allowed AMIs setting and the list of Allowed AMIs criteria at the account level in the specified Region.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of the criteria you set, the AMIs created by your account will always be discoverable and usable by users in your account.
For more information, see Control the discovery and use of AMIs in Amazon EC2 with Allowed AMIs in Amazon EC2 User Guide.
" + }, "GetAssociatedEnclaveCertificateIamRoles":{ "name":"GetAssociatedEnclaveCertificateIamRoles", "http":{ @@ -4519,6 +4589,16 @@ "output":{"shape":"GetConsoleScreenshotResult"}, "documentation":"Retrieve a JPG-format screenshot of a running instance to help with troubleshooting.
The returned content is Base64-encoded.
For more information, see Instance console output in the Amazon EC2 User Guide.
" }, + "GetDeclarativePoliciesReportSummary":{ + "name":"GetDeclarativePoliciesReportSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeclarativePoliciesReportSummaryRequest"}, + "output":{"shape":"GetDeclarativePoliciesReportSummaryResult"}, + "documentation":"Retrieves a summary of the account status report.
To view the full report, download it from the Amazon S3 bucket where it was saved. Reports are accessible only when they have the complete
status. Reports with other statuses (running
, cancelled
, or error
) are not available in the S3 bucket. For more information about downloading objects from an S3 bucket, see Downloading objects in the Amazon Simple Storage Service User Guide.
For more information, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.
" + }, "GetDefaultCreditSpecification":{ "name":"GetDefaultCreditSpecification", "http":{ @@ -4899,6 +4979,16 @@ "output":{"shape":"GetVerifiedAccessEndpointPolicyResult"}, "documentation":"Get the Verified Access policy associated with the endpoint.
" }, + "GetVerifiedAccessEndpointTargets":{ + "name":"GetVerifiedAccessEndpointTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetVerifiedAccessEndpointTargetsRequest"}, + "output":{"shape":"GetVerifiedAccessEndpointTargetsResult"}, + "documentation":"Gets the targets for the specified network CIDR endpoint for Verified Access.
" + }, "GetVerifiedAccessGroupPolicy":{ "name":"GetVerifiedAccessGroupPolicy", "http":{ @@ -5978,6 +6068,16 @@ "output":{"shape":"ReplaceIamInstanceProfileAssociationResult"}, "documentation":"Replaces an IAM instance profile for the specified running instance. You can use this action to change the IAM instance profile that's associated with an instance without having to disassociate the existing IAM instance profile first.
Use DescribeIamInstanceProfileAssociations to get the association ID.
" }, + "ReplaceImageCriteriaInAllowedImagesSettings":{ + "name":"ReplaceImageCriteriaInAllowedImagesSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceImageCriteriaInAllowedImagesSettingsRequest"}, + "output":{"shape":"ReplaceImageCriteriaInAllowedImagesSettingsResult"}, + "documentation":"Sets or replaces the criteria for Allowed AMIs.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of the criteria you set, the AMIs created by your account will always be discoverable and usable by users in your account.
For more information, see Control the discovery and use of AMIs in Amazon EC2 with Allowed AMIs in Amazon EC2 User Guide.
" + }, "ReplaceNetworkAclAssociation":{ "name":"ReplaceNetworkAclAssociation", "http":{ @@ -6270,6 +6370,16 @@ "input":{"shape":"SendDiagnosticInterruptRequest"}, "documentation":"Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).
In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.
Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.
For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) in the Amazon EC2 User Guide.
" }, + "StartDeclarativePoliciesReport":{ + "name":"StartDeclarativePoliciesReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDeclarativePoliciesReportRequest"}, + "output":{"shape":"StartDeclarativePoliciesReportResult"}, + "documentation":"Generates an account status report. The report is generated asynchronously, and can take several hours to complete.
The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId
, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization).
The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values):
s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv
Prerequisites for generating a report
The StartDeclarativePoliciesReport
API can only be called by the management account or delegated administrators for the organization.
An S3 bucket must be available before generating the report (you can create a new one or use an existing one), and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under .
Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com
. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide.
Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error.
For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.
" + }, "StartInstances":{ "name":"StartInstances", "http":{ @@ -7557,6 +7667,17 @@ "type":"string", "enum":["used"] }, + "AllowedImagesSettingsDisabledState":{ + "type":"string", + "enum":["disabled"] + }, + "AllowedImagesSettingsEnabledState":{ + "type":"string", + "enum":[ + "enabled", + "audit-mode" + ] + }, "AllowedInstanceType":{ "type":"string", "max":30, @@ -9306,6 +9427,44 @@ }, "documentation":"Describes a value for a resource attribute that is a Boolean value.
" }, + "AttributeSummary":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "documentation":"The name of the attribute.
", + "locationName":"attributeName" + }, + "MostFrequentValue":{ + "shape":"String", + "documentation":"The configuration value that is most frequently observed for the attribute.
", + "locationName":"mostFrequentValue" + }, + "NumberOfMatchedAccounts":{ + "shape":"Integer", + "documentation":"The number of accounts with the same configuration value for the attribute that is most frequently observed.
", + "locationName":"numberOfMatchedAccounts" + }, + "NumberOfUnmatchedAccounts":{ + "shape":"Integer", + "documentation":"The number of accounts with a configuration value different from the most frequently observed value for the attribute.
", + "locationName":"numberOfUnmatchedAccounts" + }, + "RegionalSummaries":{ + "shape":"RegionalSummaryList", + "documentation":"The summary report for each Region for the attribute.
", + "locationName":"regionalSummarySet" + } + }, + "documentation":"A summary report for the attribute across all Regions.
" + }, + "AttributeSummaryList":{ + "type":"list", + "member":{ + "shape":"AttributeSummary", + "locationName":"item" + } + }, "AttributeValue":{ "type":"structure", "members":{ @@ -10244,6 +10403,30 @@ } } }, + "CancelDeclarativePoliciesReportRequest":{ + "type":"structure", + "required":["ReportId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The ID of the report.
" + } + } + }, + "CancelDeclarativePoliciesReportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "documentation":"Is true
if the request succeeds, and an error otherwise.
The protocol.
" + }, + "SubnetIds":{ + "shape":"CreateVerifiedAccessEndpointSubnetIdList", + "documentation":"The IDs of the subnets.
", + "locationName":"SubnetId" + }, + "Cidr":{ + "shape":"String", + "documentation":"The CIDR.
" + }, + "PortRanges":{ + "shape":"CreateVerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"PortRange" + } + }, + "documentation":"Describes the CIDR options for a Verified Access endpoint.
" + }, "CreateVerifiedAccessEndpointEniOptions":{ "type":"structure", "members":{ @@ -16391,6 +16598,11 @@ "Port":{ "shape":"VerifiedAccessEndpointPortNumber", "documentation":"The IP port number.
" + }, + "PortRanges":{ + "shape":"CreateVerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"PortRange" } }, "documentation":"Describes the network interface options when creating an Amazon Web Services Verified Access endpoint using the network-interface
type.
The IDs of the subnets.
", "locationName":"SubnetId" + }, + "PortRanges":{ + "shape":"CreateVerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"PortRange" } }, "documentation":"Describes the load balancer options when creating an Amazon Web Services Verified Access endpoint using the load-balancer
type.
The start of the port range.
" + }, + "ToPort":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"The end of the port range.
" + } + }, + "documentation":"Describes the port range for a Verified Access endpoint.
" + }, + "CreateVerifiedAccessEndpointPortRangeList":{ + "type":"list", + "member":{ + "shape":"CreateVerifiedAccessEndpointPortRange", + "locationName":"item" + } + }, + "CreateVerifiedAccessEndpointRdsOptions":{ + "type":"structure", + "members":{ + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"The protocol.
" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"The port.
" + }, + "RdsDbInstanceArn":{ + "shape":"RdsDbInstanceArn", + "documentation":"The ARN of the RDS instance.
" + }, + "RdsDbClusterArn":{ + "shape":"RdsDbClusterArn", + "documentation":"The ARN of the DB cluster.
" + }, + "RdsDbProxyArn":{ + "shape":"RdsDbProxyArn", + "documentation":"The ARN of the RDS proxy.
" + }, + "RdsEndpoint":{ + "shape":"String", + "documentation":"The RDS endpoint.
" + }, + "SubnetIds":{ + "shape":"CreateVerifiedAccessEndpointSubnetIdList", + "documentation":"The IDs of the subnets.
", + "locationName":"SubnetId" + } + }, + "documentation":"Describes the RDS options for a Verified Access endpoint.
" + }, "CreateVerifiedAccessEndpointRequest":{ "type":"structure", "required":[ "VerifiedAccessGroupId", "EndpointType", - "AttachmentType", - "DomainCertificateArn", - "ApplicationDomain", - "EndpointDomainPrefix" + "AttachmentType" ], "members":{ "VerifiedAccessGroupId":{ @@ -16491,6 +16761,14 @@ "SseSpecification":{ "shape":"VerifiedAccessSseSpecificationRequest", "documentation":"The options for server side encryption.
" + }, + "RdsOptions":{ + "shape":"CreateVerifiedAccessEndpointRdsOptions", + "documentation":"The RDS details. This parameter is required if the endpoint type is rds
.
The CIDR options. This parameter is required if the endpoint type is cidr
.
Enable or disable support for Federal Information Processing Standards (FIPS) on the instance.
" + }, + "CidrEndpointsCustomSubDomain":{ + "shape":"String", + "documentation":"The custom subdomain.
" } } }, @@ -16594,6 +16876,44 @@ } } }, + "CreateVerifiedAccessNativeApplicationOidcOptions":{ + "type":"structure", + "members":{ + "PublicSigningKeyEndpoint":{ + "shape":"String", + "documentation":"The public signing key endpoint.
" + }, + "Issuer":{ + "shape":"String", + "documentation":"The OIDC issuer identifier of the IdP.
" + }, + "AuthorizationEndpoint":{ + "shape":"String", + "documentation":"The authorization endpoint of the IdP.
" + }, + "TokenEndpoint":{ + "shape":"String", + "documentation":"The token endpoint of the IdP.
" + }, + "UserInfoEndpoint":{ + "shape":"String", + "documentation":"The user info endpoint of the IdP.
" + }, + "ClientId":{ + "shape":"String", + "documentation":"The OAuth 2.0 client identifier.
" + }, + "ClientSecret":{ + "shape":"ClientSecretType", + "documentation":"The OAuth 2.0 client secret.
" + }, + "Scope":{ + "shape":"String", + "documentation":"The set of user claims to be requested from the IdP.
" + } + }, + "documentation":"Describes the OpenID Connect (OIDC) options.
" + }, "CreateVerifiedAccessTrustProviderDeviceOptions":{ "type":"structure", "members":{ @@ -16694,6 +17014,10 @@ "SseSpecification":{ "shape":"VerifiedAccessSseSpecificationRequest", "documentation":"The options for server side encryption.
" + }, + "NativeApplicationOidcOptions":{ + "shape":"CreateVerifiedAccessNativeApplicationOidcOptions", + "documentation":"The OpenID Connect (OIDC) options.
" } } }, @@ -16897,10 +17221,7 @@ }, "CreateVpcEndpointRequest":{ "type":"structure", - "required":[ - "VpcId", - "ServiceName" - ], + "required":["VpcId"], "members":{ "DryRun":{ "shape":"Boolean", @@ -16963,6 +17284,14 @@ "documentation":"The subnet configurations for the endpoint.
", "locationName":"SubnetConfiguration" }, + "ServiceNetworkArn":{ + "shape":"ServiceNetworkArn", + "documentation":"The Amazon Resource Name (ARN) of a service network that will be associated with the VPC endpoint of type service-network.
" + }, + "ResourceConfigurationArn":{ + "shape":"ResourceConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of a resource configuration that will be associated with the VPC endpoint of type resource.
" + }, "ServiceRegion":{ "shape":"String", "documentation":"The Region where the service is hosted. The default is the current Region.
" @@ -17460,6 +17789,65 @@ ] }, "DateTime":{"type":"timestamp"}, + "DeclarativePoliciesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "DeclarativePoliciesReport":{ + "type":"structure", + "members":{ + "ReportId":{ + "shape":"String", + "documentation":"The ID of the report.
", + "locationName":"reportId" + }, + "S3Bucket":{ + "shape":"String", + "documentation":"The name of the Amazon S3 bucket where the report is located.
", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"The prefix for your S3 object.
", + "locationName":"s3Prefix" + }, + "TargetId":{ + "shape":"String", + "documentation":"The root ID, organizational unit ID, or account ID.
Format:
For root: r-ab12
For OU: ou-ab12-cdef1234
For account: 123456789012
The time when the report generation started.
", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"MillisecondDateTime", + "documentation":"The time when the report generation ended.
", + "locationName":"endTime" + }, + "Status":{ + "shape":"ReportState", + "documentation":"The current status of the report.
", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "documentation":"Any tags assigned to the report.
", + "locationName":"tagSet" + } + }, + "documentation":"Describes the metadata of the account status report.
" + }, + "DeclarativePoliciesReportId":{"type":"string"}, + "DeclarativePoliciesReportList":{ + "type":"list", + "member":{ + "shape":"DeclarativePoliciesReport", + "locationName":"item" + } + }, "DedicatedHostFlag":{"type":"boolean"}, "DedicatedHostId":{"type":"string"}, "DedicatedHostIdList":{ @@ -20798,6 +21186,43 @@ }, "documentation":"Contains the output of DescribeCustomerGateways.
" }, + "DescribeDeclarativePoliciesReportsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.
" + }, + "MaxResults":{ + "shape":"DeclarativePoliciesMaxResults", + "documentation":"The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" + }, + "ReportIds":{ + "shape":"ValueStringList", + "documentation":"One or more report IDs.
", + "locationName":"ReportId" + } + } + }, + "DescribeDeclarativePoliciesReportsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"The token to include in another request to get the next page of items. This value is null
when there are no more items to return.
The report metadata.
", + "locationName":"reportSet" + } + } + }, "DescribeDhcpOptionsMaxResults":{ "type":"integer", "max":1000, @@ -21861,7 +22286,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"The filters.
architecture
- The image architecture (i386
| x86_64
| arm64
| x86_64_mac
| arm64_mac
).
block-device-mapping.delete-on-termination
- A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.
block-device-mapping.device-name
- The device name specified in the block device mapping (for example, /dev/sdh
or xvdh
).
block-device-mapping.snapshot-id
- The ID of the snapshot used for the Amazon EBS volume.
block-device-mapping.volume-size
- The volume size of the Amazon EBS volume, in GiB.
block-device-mapping.volume-type
- The volume type of the Amazon EBS volume (io1
| io2
| gp2
| gp3
| sc1
| st1
| standard
).
block-device-mapping.encrypted
- A Boolean that indicates whether the Amazon EBS volume is encrypted.
creation-date
- The time when the image was created, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z
. You can use a wildcard (*
), for example, 2021-09-29T*
, which matches an entire day.
description
- The description of the image (provided during image creation).
ena-support
- A Boolean that indicates whether enhanced networking with ENA is enabled.
hypervisor
- The hypervisor type (ovm
| xen
).
image-id
- The ID of the image.
image-type
- The image type (machine
| kernel
| ramdisk
).
is-public
- A Boolean that indicates whether the image is public.
kernel-id
- The kernel ID.
manifest-location
- The location of the image manifest.
name
- The name of the AMI (provided during image creation).
owner-alias
- The owner alias (amazon
| aws-backup-vault
| aws-marketplace
). The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set using the IAM console. We recommend that you use the Owner request parameter instead of this filter.
owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the Owner request parameter instead of this filter.
platform
- The platform. The only supported value is windows
.
product-code
- The product code.
product-code.type
- The type of the product code (marketplace
).
ramdisk-id
- The RAM disk ID.
root-device-name
- The device name of the root device volume (for example, /dev/sda1
).
root-device-type
- The type of the root device volume (ebs
| instance-store
).
source-instance-id
- The ID of the instance that the AMI was created from if the AMI was created using CreateImage. This filter is applicable only if the AMI was created using CreateImage.
state
- The state of the image (available
| pending
| failed
).
state-reason-code
- The reason code for the state change.
state-reason-message
- The message for the state change.
sriov-net-support
- A value of simple
indicates that enhanced networking with the Intel 82599 VF interface is enabled.
tag:<key>
- The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
virtualization-type
- The virtualization type (paravirtual
| hvm
).
The filters.
architecture
- The image architecture (i386
| x86_64
| arm64
| x86_64_mac
| arm64_mac
).
block-device-mapping.delete-on-termination
- A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.
block-device-mapping.device-name
- The device name specified in the block device mapping (for example, /dev/sdh
or xvdh
).
block-device-mapping.snapshot-id
- The ID of the snapshot used for the Amazon EBS volume.
block-device-mapping.volume-size
- The volume size of the Amazon EBS volume, in GiB.
block-device-mapping.volume-type
- The volume type of the Amazon EBS volume (io1
| io2
| gp2
| gp3
| sc1
| st1
| standard
).
block-device-mapping.encrypted
- A Boolean that indicates whether the Amazon EBS volume is encrypted.
creation-date
- The time when the image was created, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z
. You can use a wildcard (*
), for example, 2021-09-29T*
, which matches an entire day.
description
- The description of the image (provided during image creation).
ena-support
- A Boolean that indicates whether enhanced networking with ENA is enabled.
hypervisor
- The hypervisor type (ovm
| xen
).
image-allowed
- A Boolean that indicates whether the image meets the criteria specified for Allowed AMIs.
image-id
- The ID of the image.
image-type
- The image type (machine
| kernel
| ramdisk
).
is-public
- A Boolean that indicates whether the image is public.
kernel-id
- The kernel ID.
manifest-location
- The location of the image manifest.
name
- The name of the AMI (provided during image creation).
owner-alias
- The owner alias (amazon
| aws-backup-vault
| aws-marketplace
). The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set using the IAM console. We recommend that you use the Owner request parameter instead of this filter.
owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the Owner request parameter instead of this filter.
platform
- The platform. The only supported value is windows
.
product-code
- The product code.
product-code.type
- The type of the product code (marketplace
).
ramdisk-id
- The RAM disk ID.
root-device-name
- The device name of the root device volume (for example, /dev/sda1
).
root-device-type
- The type of the root device volume (ebs
| instance-store
).
source-image-id
- The ID of the source AMI from which the AMI was created.
source-image-region
- The Region of the source AMI.
source-instance-id
- The ID of the instance that the AMI was created from if the AMI was created using CreateImage. This filter is applicable only if the AMI was created using CreateImage.
state
- The state of the image (available
| pending
| failed
).
state-reason-code
- The reason code for the state change.
state-reason-message
- The message for the state change.
sriov-net-support
- A value of simple
indicates that enhanced networking with the Intel 82599 VF interface is enabled.
tag:<key>
- The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
virtualization-type
- The virtualization type (paravirtual
| hvm
).
The filters.
availability-zone
- The name of the Availability Zone (for example, us-west-2a
) or Local Zone (for example, us-west-2-lax-1b
) of the instance.
instance-id
- The ID of the instance.
instance-state-name
- The state of the instance (pending
| running
| shutting-down
| terminated
| stopping
| stopped
).
instance-type
- The type of instance (for example, t3.micro
).
launch-time
- The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2023-09-29T11:04:43.305Z
. You can use a wildcard (*
), for example, 2023-09-29T*
, which matches an entire day.
tag:<key>
- The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
zone-id
- The ID of the Availability Zone (for example, usw2-az2
) or Local Zone (for example, usw2-lax1-az1
) of the instance.
The filters.
availability-zone
- The name of the Availability Zone (for example, us-west-2a
) or Local Zone (for example, us-west-2-lax-1b
) of the instance.
instance-id
- The ID of the instance.
image-allowed
- A Boolean that indicates whether the image meets the criteria specified for Allowed AMIs.
instance-state-name
- The state of the instance (pending
| running
| shutting-down
| terminated
| stopping
| stopped
).
instance-type
- The type of instance (for example, t3.micro
).
launch-time
- The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2023-09-29T11:04:43.305Z
. You can use a wildcard (*
), for example, 2023-09-29T*
, which matches an entire day.
owner-alias
- The owner alias (amazon
| aws-marketplace
| aws-backup-vault
). The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set using the IAM console. We recommend that you use the Owner
request parameter instead of this filter.
owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the Owner
request parameter instead of this filter.
tag:<key>
- The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
zone-id
- The ID of the Availability Zone (for example, usw2-az2
) or Local Zone (for example, usw2-lax1-az1
) of the instance.
The filters.
description
- A description of the snapshot.
encrypted
- Indicates whether the snapshot is encrypted (true
| false
)
owner-alias
- The owner alias, from an Amazon-maintained list (amazon
). This is not the user-configured Amazon Web Services account alias set using the IAM console. We recommend that you use the related parameter instead of this filter.
owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the related parameter instead of this filter.
progress
- The progress of the snapshot, as a percentage (for example, 80%).
snapshot-id
- The snapshot ID.
start-time
- The time stamp when the snapshot was initiated.
status
- The status of the snapshot (pending
| completed
| error
).
storage-tier
- The storage tier of the snapshot (archive
| standard
).
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
volume-id
- The ID of the volume the snapshot is for.
volume-size
- The size of the volume, in GiB.
The filters.
description
- A description of the snapshot.
encrypted
- Indicates whether the snapshot is encrypted (true
| false
)
owner-alias
- The owner alias, from an Amazon-maintained list (amazon
). This is not the user-configured Amazon Web Services account alias set using the IAM console. We recommend that you use the related parameter instead of this filter.
owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the related parameter instead of this filter.
progress
- The progress of the snapshot, as a percentage (for example, 80%).
snapshot-id
- The snapshot ID.
start-time
- The time stamp when the snapshot was initiated.
status
- The status of the snapshot (pending
| completed
| error
).
storage-tier
- The storage tier of the snapshot (archive
| standard
).
transfer-type
- The type of operation used to create the snapshot (time-based
| standard
).
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
volume-id
- The ID of the volume the snapshot is for.
volume-size
- The size of the volume, in GiB.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The IDs of the VPC endpoints.
", + "locationName":"VpcEndpointId" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"The filters.
vpc-endpoint-id
- The ID of the VPC endpoint.
associated-resource-accessibility
- The association state. When the state is accessible
, it returns AVAILABLE
. When the state is inaccessible
, it returns PENDING
or FAILED
.
association-id
- The ID of the VPC endpoint association.
associated-resource-id
- The ID of the associated resource configuration.
service-network-arn
- The Amazon Resource Name (ARN) of the associated service network. Only VPC endpoints of type service network will be returned.
resource-configuration-group-arn
- The Amazon Resource Name (ARN) of the resource configuration of type GROUP.
service-network-resource-association-id
- The ID of the association.
The maximum page size.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"The pagination token.
" + } + } + }, + "DescribeVpcEndpointAssociationsResult":{ + "type":"structure", + "members":{ + "VpcEndpointAssociations":{ + "shape":"VpcEndpointAssociationSet", + "documentation":"Details of the endpoint associations.
", + "locationName":"vpcEndpointAssociationSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"The pagination token.
", + "locationName":"nextToken" + } + } + }, "DescribeVpcEndpointConnectionNotificationsRequest":{ "type":"structure", "members":{ @@ -27101,6 +27568,13 @@ "jumpcloud" ] }, + "DeviceTrustProviderTypeList":{ + "type":"list", + "member":{ + "shape":"DeviceTrustProviderType", + "locationName":"item" + } + }, "DeviceType":{ "type":"string", "enum":[ @@ -27224,6 +27698,25 @@ } } }, + "DisableAllowedImagesSettingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns disabled
if the request succeeds; otherwise, it returns an error.
The entity that manages the EBS volume.
", + "documentation":"The service provider that manages the EBS volume.
", "locationName":"operator" } }, @@ -29222,6 +29715,30 @@ } } }, + "EnableAllowedImagesSettingsRequest":{ + "type":"structure", + "required":["AllowedImagesSettingsState"], + "members":{ + "AllowedImagesSettingsState":{ + "shape":"AllowedImagesSettingsEnabledState", + "documentation":"Specify enabled
to apply the image criteria specified by the Allowed AMIs settings. Specify audit-mode
so that you can check which AMIs will be allowed or not allowed by the image criteria.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns enabled
or audit-mode
if the request succeeds; otherwise, it returns an error.
The ID of the Verified Access instance.
" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The version.
", + "locationName":"version" + }, + "VerifiedAccessInstanceId":{ + "shape":"String", + "documentation":"The ID of the Verified Access instance.
", + "locationName":"verifiedAccessInstanceId" + }, + "Region":{ + "shape":"String", + "documentation":"The Region.
", + "locationName":"region" + }, + "DeviceTrustProviders":{ + "shape":"DeviceTrustProviderTypeList", + "documentation":"The device trust providers.
", + "locationName":"deviceTrustProviderSet" + }, + "UserTrustProvider":{ + "shape":"VerifiedAccessInstanceUserTrustProviderClientConfiguration", + "documentation":"The user identity trust provider.
", + "locationName":"userTrustProvider" + }, + "OpenVpnConfigurations":{ + "shape":"VerifiedAccessInstanceOpenVpnClientConfigurationList", + "documentation":"The Open VPN configuration.
", + "locationName":"openVpnConfigurationSet" + } + } + }, "ExportVmTaskId":{"type":"string"}, "FailedCapacityReservationFleetCancellationResult":{ "type":"structure", @@ -31815,6 +32381,35 @@ "type":"string", "enum":["ipsec.1"] }, + "GetAllowedImagesSettingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The current state of the Allowed AMIs setting at the account level in the specified Amazon Web Services Region.
Possible values:
disabled
: All AMIs are allowed.
audit-mode
: All AMIs are allowed, but the ImageAllowed
field is set to true
if the AMI would be allowed with the current list of criteria if allowed AMIs was enabled.
enabled
: Only AMIs matching the image criteria are discoverable and available for use.
The list of criteria for images that are discoverable and usable in the account in the specified Amazon Web Services Region.
", + "locationName":"imageCriterionSet" + }, + "ManagedBy":{ + "shape":"ManagedBy", + "documentation":"The entity that manages the Allowed AMIs settings. Possible values include:
account
- The Allowed AMIs settings is managed by the account.
declarative-policy
- The Allowed AMIs settings is managed by a declarative policy and can't be modified by the account.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The ID of the report.
" + } + } + }, + "GetDeclarativePoliciesReportSummaryResult":{ + "type":"structure", + "members":{ + "ReportId":{ + "shape":"String", + "documentation":"The ID of the report.
", + "locationName":"reportId" + }, + "S3Bucket":{ + "shape":"String", + "documentation":"The name of the Amazon S3 bucket where the report is located.
", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"The prefix for your S3 object.
", + "locationName":"s3Prefix" + }, + "TargetId":{ + "shape":"String", + "documentation":"The root ID, organizational unit ID, or account ID.
Format:
For root: r-ab12
For OU: ou-ab12-cdef1234
For account: 123456789012
The time when the report generation started.
", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"MillisecondDateTime", + "documentation":"The time when the report generation ended.
", + "locationName":"endTime" + }, + "NumberOfAccounts":{ + "shape":"Integer", + "documentation":"The total number of accounts associated with the specified targetId
.
The number of accounts where attributes could not be retrieved in any Region.
", + "locationName":"numberOfFailedAccounts" + }, + "AttributeSummaries":{ + "shape":"AttributeSummaryList", + "documentation":"The attributes described in the report.
", + "locationName":"attributeSummarySet" + } + } + }, "GetDefaultCreditSpecificationRequest":{ "type":"structure", "required":["InstanceFamily"], @@ -32316,6 +32975,11 @@ "shape":"String", "documentation":"The current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.
Possible values:
block-new-sharing
- Any attempt to publicly share your AMIs in the specified Region is blocked.
unblocked
- Your AMIs in the specified Region can be publicly shared.
The entity that manages the state for block public access for AMIs. Possible values include:
account
- The state is managed by the account.
declarative-policy
- The state is managed by a declarative policy and can't be modified by the account.
If true
, access to the EC2 serial console of all instances is enabled for your account. If false
, access to the EC2 serial console of all instances is disabled for your account.
The entity that manages access to the serial console. Possible values include:
account
- Access is managed by the account.
declarative-policy
- Access is managed by a declarative policy and can't be modified by the account.
The current state of block public access for snapshots. Possible values include:
block-all-sharing
- All public sharing of snapshots is blocked. Users in the account can't request new public sharing. Additionally, snapshots that were already publicly shared are treated as private and are not publicly available.
block-new-sharing
- Only new public sharing of snapshots is blocked. Users in the account can't request new public sharing. However, snapshots that were already publicly shared, remain publicly available.
unblocked
- Public sharing is not blocked. Users can publicly share snapshots.
The entity that manages the state for block public access for snapshots. Possible values include:
account
- The state is managed by the account.
declarative-policy
- The state is managed by a declarative policy and can't be modified by the account.
The ID of the network CIDR endpoint.
" + }, + "MaxResults":{ + "shape":"GetVerifiedAccessEndpointTargetsMaxResults", + "documentation":"The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The token for the next page of results.
" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The Verified Access targets.
", + "locationName":"verifiedAccessEndpointTargetSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The date and time, in ISO 8601 date-time format, when the AMI was last used to launch an EC2 instance. When the AMI is used to launch an instance, there is a 24-hour delay before that usage is reported.
lastLaunchedTime
data is available starting April 2017.
If true
, the AMI satisfies the criteria for Allowed AMIs and can be discovered and used in the account. If false
and Allowed AMIs is set to enabled
, the AMI can't be discovered or used in the account. If false
and Allowed AMIs is set to audit-mode
, the AMI can be discovered and used in the account.
For more information, see Control the discovery and use of AMIs in Amazon EC2 with Allowed AMIs in Amazon EC2 User Guide.
", + "locationName":"imageAllowed" + }, "SourceImageId":{ "shape":"String", "documentation":"The ID of the source AMI from which the AMI was created.
The ID only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The ID does not appear if the AMI was created using any other API. For some older AMIs, the ID might not be available. For more information, see Identify the source AMI used to create a new AMI in the Amazon EC2 User Guide.
", @@ -34789,6 +35510,42 @@ "type":"string", "enum":["block-new-sharing"] }, + "ImageCriterion":{ + "type":"structure", + "members":{ + "ImageProviders":{ + "shape":"ImageProviderList", + "documentation":"A list of AMI providers whose AMIs are discoverable and useable in the account. Up to a total of 200 values can be specified.
Possible values:
amazon
: Allow AMIs created by Amazon Web Services.
aws-marketplace
: Allow AMIs created by verified providers in the Amazon Web Services Marketplace.
aws-backup-vault
: Allow AMIs created by Amazon Web Services Backup.
12-digit account ID: Allow AMIs created by this account. One or more account IDs can be specified.
none
: Allow AMIs created by your own account only.
The list of criteria that are evaluated to determine whch AMIs are discoverable and usable in the account in the specified Amazon Web Services Region. Currently, the only criteria that can be specified are AMI providers.
Up to 10 imageCriteria
objects can be specified, and up to a total of 200 values for all imageProviders
. For more information, see JSON configuration for the Allowed AMIs criteria in the Amazon EC2 User Guide.
A list of image providers whose AMIs are discoverable and useable in the account. Up to a total of 200 values can be specified.
Possible values:
amazon
: Allow AMIs created by Amazon Web Services.
aws-marketplace
: Allow AMIs created by verified providers in the Amazon Web Services Marketplace.
aws-backup-vault
: Allow AMIs created by Amazon Web Services Backup.
12-digit account ID: Allow AMIs created by this account. One or more account IDs can be specified.
none
: Allow AMIs created by your own account only. When none
is specified, no other values can be specified.
The list of criteria that are evaluated to determine whch AMIs are discoverable and usable in the account in the specified Amazon Web Services Region. Currently, the only criteria that can be specified are AMI providers.
Up to 10 imageCriteria
objects can be specified, and up to a total of 200 values for all imageProviders
. For more information, see JSON configuration for the Allowed AMIs criteria in the Amazon EC2 User Guide.
The deprecation date and time of the AMI, in UTC, in the following format: YYYY-MM-DDTHH:MM:SSZ.
", "locationName":"deprecationTime" }, + "ImageAllowed":{ + "shape":"Boolean", + "documentation":"If true
, the AMI satisfies the criteria for Allowed AMIs and can be discovered and used in the account. If false
, the AMI can't be discovered or used in the account.
For more information, see Control the discovery and use of AMIs in Amazon EC2 with Allowed AMIs in Amazon EC2 User Guide.
", + "locationName":"imageAllowed" + }, "IsPublic":{ "shape":"Boolean", "documentation":"Indicates whether the AMI has public launch permissions. A value of true
means this AMI has public launch permissions, while false
means it has only implicit (AMI owner) or explicit (shared with your account) launch permissions.
Information about the AMI.
" }, + "ImageProvider":{"type":"string"}, + "ImageProviderList":{ + "type":"list", + "member":{ + "shape":"ImageProvider", + "locationName":"item" + } + }, + "ImageProviderRequest":{"type":"string"}, + "ImageProviderRequestList":{ + "type":"list", + "member":{ + "shape":"ImageProviderRequest", + "locationName":"item" + } + }, "ImageRecycleBinInfo":{ "type":"structure", "members":{ @@ -35963,7 +36741,7 @@ }, "Operator":{ "shape":"OperatorResponse", - "documentation":"The entity that manages the instance.
", + "documentation":"The service provider that manages the instance.
", "locationName":"operator" }, "InstanceId":{ @@ -36902,6 +37680,16 @@ "shape":"InstanceMetadataTagsState", "documentation":"Indicates whether access to instance tags from the instance metadata is enabled or disabled. For more information, see Work with instance tags using the instance metadata in the Amazon EC2 User Guide.
", "locationName":"instanceMetadataTags" + }, + "ManagedBy":{ + "shape":"ManagedBy", + "documentation":"The entity that manages the IMDS default settings. Possible values include:
account
- The IMDS default settings are managed by the account.
declarative-policy
- The IMDS default settings are managed by a declarative policy and can't be modified by the account.
The customized exception message that is specified in the declarative policy.
", + "locationName":"managedExceptionMessage" } }, "documentation":"The default instance metadata service (IMDS) settings that were set at the account level in the specified Amazon Web Services Region.
" @@ -37119,7 +37907,7 @@ }, "Operator":{ "shape":"OperatorResponse", - "documentation":"The entity that manages the network interface.
", + "documentation":"The service provider that manages the network interface.
", "locationName":"operator" } }, @@ -37720,7 +38508,7 @@ }, "Operator":{ "shape":"OperatorResponse", - "documentation":"The entity that manages the instance.
", + "documentation":"The service provider that manages the instance.
", "locationName":"operator" }, "Events":{ @@ -38809,7 +39597,25 @@ "x8g.24xlarge", "x8g.48xlarge", "x8g.metal-24xl", - "x8g.metal-48xl" + "x8g.metal-48xl", + "i7ie.large", + "i7ie.xlarge", + "i7ie.2xlarge", + "i7ie.3xlarge", + "i7ie.6xlarge", + "i7ie.12xlarge", + "i7ie.18xlarge", + "i7ie.24xlarge", + "i7ie.48xlarge", + "i8g.large", + "i8g.xlarge", + "i8g.2xlarge", + "i8g.4xlarge", + "i8g.8xlarge", + "i8g.12xlarge", + "i8g.16xlarge", + "i8g.24xlarge", + "i8g.metal-24xl" ] }, "InstanceTypeHypervisor":{ @@ -43471,6 +44277,13 @@ }, "documentation":"Details for Site-to-Site VPN tunnel endpoint maintenance events.
" }, + "ManagedBy":{ + "type":"string", + "enum":[ + "account", + "declarative-policy" + ] + }, "ManagedPrefixList":{ "type":"structure", "members":{ @@ -45725,6 +46538,17 @@ } } }, + "ModifyVerifiedAccessEndpointCidrOptions":{ + "type":"structure", + "members":{ + "PortRanges":{ + "shape":"ModifyVerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"PortRange" + } + }, + "documentation":"The CIDR options for a Verified Access endpoint.
" + }, "ModifyVerifiedAccessEndpointEniOptions":{ "type":"structure", "members":{ @@ -45735,6 +46559,11 @@ "Port":{ "shape":"VerifiedAccessEndpointPortNumber", "documentation":"The IP port number.
" + }, + "PortRanges":{ + "shape":"ModifyVerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"PortRange" } }, "documentation":"Describes the options when modifying a Verified Access endpoint with the network-interface
type.
The IP port number.
" + }, + "PortRanges":{ + "shape":"ModifyVerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"PortRange" } }, "documentation":"Describes a load balancer when creating an Amazon Web Services Verified Access endpoint using the load-balancer
type.
The start of the port range.
" + }, + "ToPort":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"The end of the port range.
" + } + }, + "documentation":"Describes the port range for a Verified Access endpoint.
" + }, + "ModifyVerifiedAccessEndpointPortRangeList":{ + "type":"list", + "member":{ + "shape":"ModifyVerifiedAccessEndpointPortRange", + "locationName":"item" + } + }, + "ModifyVerifiedAccessEndpointRdsOptions":{ + "type":"structure", + "members":{ + "SubnetIds":{ + "shape":"ModifyVerifiedAccessEndpointSubnetIdList", + "documentation":"The IDs of the subnets.
", + "locationName":"SubnetId" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"The port.
" + }, + "RdsEndpoint":{ + "shape":"String", + "documentation":"The RDS endpoint.
" + } + }, + "documentation":"The RDS options for a Verified Access endpoint.
" + }, "ModifyVerifiedAccessEndpointRequest":{ "type":"structure", "required":["VerifiedAccessEndpointId"], @@ -45841,6 +46715,14 @@ "DryRun":{ "shape":"Boolean", "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The RDS options.
" + }, + "CidrOptions":{ + "shape":"ModifyVerifiedAccessEndpointCidrOptions", + "documentation":"The CIDR options.
" } } }, @@ -46005,6 +46887,10 @@ "shape":"String", "documentation":"A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.
", "idempotencyToken":true + }, + "CidrEndpointsCustomSubDomain":{ + "shape":"String", + "documentation":"The custom subdomain.
" } } }, @@ -46018,12 +46904,50 @@ } } }, + "ModifyVerifiedAccessNativeApplicationOidcOptions":{ + "type":"structure", + "members":{ + "PublicSigningKeyEndpoint":{ + "shape":"String", + "documentation":"The public signing key endpoint.
" + }, + "Issuer":{ + "shape":"String", + "documentation":"The OIDC issuer identifier of the IdP.
" + }, + "AuthorizationEndpoint":{ + "shape":"String", + "documentation":"The authorization endpoint of the IdP.
" + }, + "TokenEndpoint":{ + "shape":"String", + "documentation":"The token endpoint of the IdP.
" + }, + "UserInfoEndpoint":{ + "shape":"String", + "documentation":"The user info endpoint of the IdP.
" + }, + "ClientId":{ + "shape":"String", + "documentation":"The OAuth 2.0 client identifier.
" + }, + "ClientSecret":{ + "shape":"ClientSecretType", + "documentation":"The OAuth 2.0 client secret.
" + }, + "Scope":{ + "shape":"String", + "documentation":"The set of user claims to be requested from the IdP.
" + } + }, + "documentation":"Describes the OpenID Connect (OIDC) options.
" + }, "ModifyVerifiedAccessTrustProviderDeviceOptions":{ "type":"structure", "members":{ "PublicSigningKeyUrl":{ "shape":"String", - "documentation":"The URL Amazon Web Services Verified Access will use to verify the authenticity of the device tokens.
" + "documentation":"The URL Amazon Web Services Verified Access will use to verify the authenticity of the device tokens.
" } }, "documentation":"Modifies the configuration of the specified device-based Amazon Web Services Verified Access trust provider.
" @@ -46094,6 +47018,10 @@ "SseSpecification":{ "shape":"VerifiedAccessSseSpecificationRequest", "documentation":"The options for server side encryption.
" + }, + "NativeApplicationOidcOptions":{ + "shape":"ModifyVerifiedAccessNativeApplicationOidcOptions", + "documentation":"The OpenID Connect (OIDC) options.
" } } }, @@ -47168,6 +48096,47 @@ "deleted" ] }, + "NativeApplicationOidcOptions":{ + "type":"structure", + "members":{ + "PublicSigningKeyEndpoint":{ + "shape":"String", + "documentation":"The public signing key endpoint.
", + "locationName":"publicSigningKeyEndpoint" + }, + "Issuer":{ + "shape":"String", + "documentation":"The OIDC issuer identifier of the IdP.
", + "locationName":"issuer" + }, + "AuthorizationEndpoint":{ + "shape":"String", + "documentation":"The authorization endpoint of the IdP.
", + "locationName":"authorizationEndpoint" + }, + "TokenEndpoint":{ + "shape":"String", + "documentation":"The token endpoint of the IdP.
", + "locationName":"tokenEndpoint" + }, + "UserInfoEndpoint":{ + "shape":"String", + "documentation":"The user info endpoint of the IdP.
", + "locationName":"userInfoEndpoint" + }, + "ClientId":{ + "shape":"String", + "documentation":"The OAuth 2.0 client identifier.
", + "locationName":"clientId" + }, + "Scope":{ + "shape":"String", + "documentation":"The set of user claims to be requested from the IdP.
", + "locationName":"scope" + } + }, + "documentation":"Describes the OpenID Connect (OIDC) options.
" + }, "NetmaskLength":{"type":"integer"}, "NetworkAcl":{ "type":"structure", @@ -47928,7 +48897,7 @@ }, "Operator":{ "shape":"OperatorResponse", - "documentation":"The entity that manages the network interface.
", + "documentation":"The service provider that manages the network interface.
", "locationName":"operator" } }, @@ -48580,26 +49549,26 @@ "members":{ "Principal":{ "shape":"String", - "documentation":"The entity that manages the resource.
" + "documentation":"The service provider that manages the resource.
" } }, - "documentation":"The entity that manages the resource.
" + "documentation":"The service provider that manages the resource.
" }, "OperatorResponse":{ "type":"structure", "members":{ "Managed":{ "shape":"Boolean", - "documentation":"If true
, the resource is managed by an entity.
If true
, the resource is managed by an service provider.
If managed
is true
, then the principal is returned. The principal is the entity that manages the resource.
If managed
is true
, then the principal is returned. The principal is the service provider that manages the resource.
Describes whether the resource is managed by an entity and, if so, describes the entity that manages it.
" + "documentation":"Describes whether the resource is managed by an service provider and, if so, describes the service provider that manages it.
" }, "OrganizationArnStringList":{ "type":"list", @@ -50673,6 +51642,9 @@ ] }, "RamdiskId":{"type":"string"}, + "RdsDbClusterArn":{"type":"string"}, + "RdsDbInstanceArn":{"type":"string"}, + "RdsDbProxyArn":{"type":"string"}, "ReasonCodesList":{ "type":"list", "member":{ @@ -50795,6 +51767,34 @@ "max":10, "min":0 }, + "RegionalSummary":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"String", + "documentation":"The Amazon Web Services Region.
", + "locationName":"regionName" + }, + "NumberOfMatchedAccounts":{ + "shape":"Integer", + "documentation":"The number of accounts in the Region with the same configuration value for the attribute that is most frequently observed.
", + "locationName":"numberOfMatchedAccounts" + }, + "NumberOfUnmatchedAccounts":{ + "shape":"Integer", + "documentation":"The number of accounts in the Region with a configuration value different from the most frequently observed value for the attribute.
", + "locationName":"numberOfUnmatchedAccounts" + } + }, + "documentation":"A summary report for the attribute for a Region.
" + }, + "RegionalSummaryList":{ + "type":"list", + "member":{ + "shape":"RegionalSummary", + "locationName":"item" + } + }, "RegisterImageRequest":{ "type":"structure", "required":["Name"], @@ -51328,6 +52328,30 @@ } } }, + "ReplaceImageCriteriaInAllowedImagesSettingsRequest":{ + "type":"structure", + "members":{ + "ImageCriteria":{ + "shape":"ImageCriterionRequestList", + "documentation":"The list of criteria that are evaluated to determine whether AMIs are discoverable and usable in the account in the specified Amazon Web Services Region.
", + "locationName":"ImageCriterion" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns true
if the request succeeds; otherwise, it returns an error.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The name of the S3 bucket where the report will be saved.
" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"The prefix for your S3 object.
" + }, + "TargetId":{ + "shape":"String", + "documentation":"The root ID, organizational unit ID, or account ID.
Format:
For root: r-ab12
For OU: ou-ab12-cdef1234
For account: 123456789012
The tags to apply.
", + "locationName":"TagSpecification" + } + } + }, + "StartDeclarativePoliciesReportResult":{ + "type":"structure", + "members":{ + "ReportId":{ + "shape":"String", + "documentation":"The ID of the report.
", + "locationName":"reportId" + } + } + }, "StartInstancesRequest":{ "type":"structure", "required":["InstanceIds"], @@ -57243,7 +58320,8 @@ "Deleted", "Rejected", "Failed", - "Expired" + "Expired", + "Partial" ] }, "StateReason":{ @@ -57677,6 +58755,29 @@ "locationName":"SubnetId" } }, + "SubnetIpPrefixes":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"String", + "documentation":"ID of the subnet.
", + "locationName":"subnetId" + }, + "IpPrefixes":{ + "shape":"ValueStringList", + "documentation":"Array of SubnetIpPrefixes objects.
", + "locationName":"ipPrefixSet" + } + }, + "documentation":"Prefixes of the subnet IP.
" + }, + "SubnetIpPrefixesList":{ + "type":"list", + "member":{ + "shape":"SubnetIpPrefixes", + "locationName":"item" + } + }, "SubnetIpv6CidrBlockAssociation":{ "type":"structure", "members":{ @@ -61297,6 +62398,16 @@ "shape":"VerifiedAccessSseSpecificationResponse", "documentation":"The options in use for server side encryption.
", "locationName":"sseSpecification" + }, + "RdsOptions":{ + "shape":"VerifiedAccessEndpointRdsOptions", + "documentation":"The options for an RDS endpoint.
", + "locationName":"rdsOptions" + }, + "CidrOptions":{ + "shape":"VerifiedAccessEndpointCidrOptions", + "documentation":"The options for a CIDR endpoint.
", + "locationName":"cidrOptions" } }, "documentation":"An Amazon Web Services Verified Access endpoint specifies the application that Amazon Web Services Verified Access provides access to. It must be attached to an Amazon Web Services Verified Access group. An Amazon Web Services Verified Access endpoint must also have an attached access policy before you attached it to a group.
" @@ -61305,6 +62416,32 @@ "type":"string", "enum":["vpc"] }, + "VerifiedAccessEndpointCidrOptions":{ + "type":"structure", + "members":{ + "Cidr":{ + "shape":"String", + "documentation":"The CIDR.
", + "locationName":"cidr" + }, + "PortRanges":{ + "shape":"VerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"portRangeSet" + }, + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"The protocol.
", + "locationName":"protocol" + }, + "SubnetIds":{ + "shape":"VerifiedAccessEndpointSubnetIdList", + "documentation":"The IDs of the subnets.
", + "locationName":"subnetIdSet" + } + }, + "documentation":"Describes the CIDR options for a Verified Access endpoint.
" + }, "VerifiedAccessEndpointEniOptions":{ "type":"structure", "members":{ @@ -61322,6 +62459,11 @@ "shape":"VerifiedAccessEndpointPortNumber", "documentation":"The IP port number.
", "locationName":"port" + }, + "PortRanges":{ + "shape":"VerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"portRangeSet" } }, "documentation":"Options for a network-interface type endpoint.
" @@ -61363,6 +62505,11 @@ "shape":"VerifiedAccessEndpointSubnetIdList", "documentation":"The IDs of the subnets.
", "locationName":"subnetIdSet" + }, + "PortRanges":{ + "shape":"VerifiedAccessEndpointPortRangeList", + "documentation":"The port ranges.
", + "locationName":"portRangeSet" } }, "documentation":"Describes a load balancer when creating an Amazon Web Services Verified Access endpoint using the load-balancer
type.
The start of the port range.
", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"The end of the port range.
", + "locationName":"toPort" + } + }, + "documentation":"Describes a port range.
" + }, + "VerifiedAccessEndpointPortRangeList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessEndpointPortRange", + "locationName":"item" + } + }, "VerifiedAccessEndpointProtocol":{ "type":"string", "enum":[ "http", - "https" + "https", + "tcp" ] }, + "VerifiedAccessEndpointRdsOptions":{ + "type":"structure", + "members":{ + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"The protocol.
", + "locationName":"protocol" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"The port.
", + "locationName":"port" + }, + "RdsDbInstanceArn":{ + "shape":"String", + "documentation":"The ARN of the RDS instance.
", + "locationName":"rdsDbInstanceArn" + }, + "RdsDbClusterArn":{ + "shape":"String", + "documentation":"The ARN of the DB cluster.
", + "locationName":"rdsDbClusterArn" + }, + "RdsDbProxyArn":{ + "shape":"String", + "documentation":"The ARN of the RDS proxy.
", + "locationName":"rdsDbProxyArn" + }, + "RdsEndpoint":{ + "shape":"String", + "documentation":"The RDS endpoint.
", + "locationName":"rdsEndpoint" + }, + "SubnetIds":{ + "shape":"VerifiedAccessEndpointSubnetIdList", + "documentation":"The IDs of the subnets.
", + "locationName":"subnetIdSet" + } + }, + "documentation":"Describes the RDS options for a Verified Access endpoint.
" + }, "VerifiedAccessEndpointStatus":{ "type":"structure", "members":{ @@ -61412,11 +62624,41 @@ "locationName":"item" } }, + "VerifiedAccessEndpointTarget":{ + "type":"structure", + "members":{ + "VerifiedAccessEndpointId":{ + "shape":"VerifiedAccessEndpointId", + "documentation":"The ID of the Verified Access endpoint.
", + "locationName":"verifiedAccessEndpointId" + }, + "VerifiedAccessEndpointTargetIpAddress":{ + "shape":"String", + "documentation":"The IP address of the target.
", + "locationName":"verifiedAccessEndpointTargetIpAddress" + }, + "VerifiedAccessEndpointTargetDns":{ + "shape":"String", + "documentation":"The DNS name of the target.
", + "locationName":"verifiedAccessEndpointTargetDns" + } + }, + "documentation":"Describes the targets for the specified Verified Access endpoint.
" + }, + "VerifiedAccessEndpointTargetList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessEndpointTarget", + "locationName":"item" + } + }, "VerifiedAccessEndpointType":{ "type":"string", "enum":[ "load-balancer", - "network-interface" + "network-interface", + "rds", + "cidr" ] }, "VerifiedAccessGroup":{ @@ -61527,10 +62769,31 @@ "shape":"Boolean", "documentation":"Indicates whether support for Federal Information Processing Standards (FIPS) is enabled on the instance.
", "locationName":"fipsEnabled" + }, + "CidrEndpointsCustomSubDomain":{ + "shape":"VerifiedAccessInstanceCustomSubDomain", + "documentation":"The custom subdomain.
", + "locationName":"cidrEndpointsCustomSubDomain" } }, "documentation":"Describes a Verified Access instance.
" }, + "VerifiedAccessInstanceCustomSubDomain":{ + "type":"structure", + "members":{ + "SubDomain":{ + "shape":"String", + "documentation":"The subdomain.
", + "locationName":"subDomain" + }, + "Nameservers":{ + "shape":"ValueStringList", + "documentation":"The name servers.
", + "locationName":"nameserverSet" + } + }, + "documentation":"Describes a custom subdomain for a network CIDR endpoint for Verified Access.
" + }, "VerifiedAccessInstanceId":{"type":"string"}, "VerifiedAccessInstanceIdList":{ "type":"list", @@ -61569,6 +62832,103 @@ "locationName":"item" } }, + "VerifiedAccessInstanceOpenVpnClientConfiguration":{ + "type":"structure", + "members":{ + "Config":{ + "shape":"String", + "documentation":"The base64-encoded Open VPN client configuration.
", + "locationName":"config" + }, + "Routes":{ + "shape":"VerifiedAccessInstanceOpenVpnClientConfigurationRouteList", + "documentation":"The routes.
", + "locationName":"routeSet" + } + }, + "documentation":"Describes a set of routes.
" + }, + "VerifiedAccessInstanceOpenVpnClientConfigurationList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessInstanceOpenVpnClientConfiguration", + "locationName":"item" + } + }, + "VerifiedAccessInstanceOpenVpnClientConfigurationRoute":{ + "type":"structure", + "members":{ + "Cidr":{ + "shape":"String", + "documentation":"The CIDR block.
", + "locationName":"cidr" + } + }, + "documentation":"Describes a route.
" + }, + "VerifiedAccessInstanceOpenVpnClientConfigurationRouteList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessInstanceOpenVpnClientConfigurationRoute", + "locationName":"item" + } + }, + "VerifiedAccessInstanceUserTrustProviderClientConfiguration":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"UserTrustProviderType", + "documentation":"The trust provider type.
", + "locationName":"type" + }, + "Scopes":{ + "shape":"String", + "documentation":"The set of user claims to be requested from the IdP.
", + "locationName":"scopes" + }, + "Issuer":{ + "shape":"String", + "documentation":"The OIDC issuer identifier of the IdP.
", + "locationName":"issuer" + }, + "AuthorizationEndpoint":{ + "shape":"String", + "documentation":"The authorization endpoint of the IdP.
", + "locationName":"authorizationEndpoint" + }, + "PublicSigningKeyEndpoint":{ + "shape":"String", + "documentation":"The public signing key endpoint.
", + "locationName":"publicSigningKeyEndpoint" + }, + "TokenEndpoint":{ + "shape":"String", + "documentation":"The token endpoint of the IdP.
", + "locationName":"tokenEndpoint" + }, + "UserInfoEndpoint":{ + "shape":"String", + "documentation":"The user info endpoint of the IdP.
", + "locationName":"userInfoEndpoint" + }, + "ClientId":{ + "shape":"String", + "documentation":"The OAuth 2.0 client identifier.
", + "locationName":"clientId" + }, + "ClientSecret":{ + "shape":"ClientSecretType", + "documentation":"The OAuth 2.0 client secret.
", + "locationName":"clientSecret" + }, + "PkceEnabled":{ + "shape":"Boolean", + "documentation":"Indicates whether Proof of Key Code Exchange (PKCE) is enabled.
", + "locationName":"pkceEnabled" + } + }, + "documentation":"Describes the trust provider.
" + }, "VerifiedAccessLogCloudWatchLogsDestination":{ "type":"structure", "members":{ @@ -61867,6 +63227,11 @@ "shape":"VerifiedAccessSseSpecificationResponse", "documentation":"The options in use for server side encryption.
", "locationName":"sseSpecification" + }, + "NativeApplicationOidcOptions":{ + "shape":"NativeApplicationOidcOptions", + "documentation":"The OpenID Connect (OIDC) options.
", + "locationName":"nativeApplicationOidcOptions" } }, "documentation":"Describes a Verified Access trust provider.
" @@ -62047,7 +63412,7 @@ }, "Operator":{ "shape":"OperatorResponse", - "documentation":"The entity that manages the volume.
", + "documentation":"The service provider that manages the volume.
", "locationName":"operator" }, "VolumeId":{ @@ -62695,6 +64060,13 @@ "disable-complete" ] }, + "VpcBlockPublicAccessExclusionsAllowed":{ + "type":"string", + "enum":[ + "allowed", + "not-allowed" + ] + }, "VpcBlockPublicAccessOptions":{ "type":"structure", "members":{ @@ -62727,6 +64099,16 @@ "shape":"MillisecondDateTime", "documentation":"The last time the VPC BPA mode was updated.
", "locationName":"lastUpdateTimestamp" + }, + "ManagedBy":{ + "shape":"ManagedBy", + "documentation":"The entity that manages the state of VPC BPA. Possible values include:
account
- The state is managed by the account.
declarative-policy
- The state is managed by a declarative policy and can't be modified by the account.
Determines if exclusions are allowed. If you have enabled VPC BPA at the Organization level, exclusions may be not-allowed
. Otherwise, they are allowed
.
VPC Block Public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" @@ -62928,6 +64310,31 @@ "documentation":"The last error that occurred for endpoint.
", "locationName":"lastError" }, + "Ipv4Prefixes":{ + "shape":"SubnetIpPrefixesList", + "documentation":"Array of IPv4 prefixes.
", + "locationName":"ipv4PrefixSet" + }, + "Ipv6Prefixes":{ + "shape":"SubnetIpPrefixesList", + "documentation":"Array of IPv6 prefixes.
", + "locationName":"ipv6PrefixSet" + }, + "FailureReason":{ + "shape":"String", + "documentation":"Reason for the failure.
", + "locationName":"failureReason" + }, + "ServiceNetworkArn":{ + "shape":"ServiceNetworkArn", + "documentation":"The Amazon Resource Name (ARN) of the service network.
", + "locationName":"serviceNetworkArn" + }, + "ResourceConfigurationArn":{ + "shape":"ResourceConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the resource configuration.
", + "locationName":"resourceConfigurationArn" + }, "ServiceRegion":{ "shape":"String", "documentation":"The Region where the service is hosted.
", @@ -62936,6 +64343,79 @@ }, "documentation":"Describes a VPC endpoint.
" }, + "VpcEndpointAssociation":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"String", + "documentation":"The ID of the VPC endpoint association.
", + "locationName":"id" + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"The ID of the VPC endpoint.
", + "locationName":"vpcEndpointId" + }, + "ServiceNetworkArn":{ + "shape":"ServiceNetworkArn", + "documentation":"The Amazon Resource Name (ARN) of the service network.
", + "locationName":"serviceNetworkArn" + }, + "ServiceNetworkName":{ + "shape":"String", + "documentation":"The name of the service network.
", + "locationName":"serviceNetworkName" + }, + "AssociatedResourceAccessibility":{ + "shape":"String", + "documentation":"The connectivity status of the resources associated to a VPC endpoint. The resource is accessible if the associated resource configuration is AVAILABLE
, otherwise the resource is inaccessible.
A message related to why an VPC endpoint association failed.
", + "locationName":"failureReason" + }, + "FailureCode":{ + "shape":"String", + "documentation":"An error code related to why an VPC endpoint association failed.
", + "locationName":"failureCode" + }, + "DnsEntry":{ + "shape":"DnsEntry", + "documentation":"The DNS entry of the VPC endpoint association.
", + "locationName":"dnsEntry" + }, + "PrivateDnsEntry":{ + "shape":"DnsEntry", + "documentation":"The private DNS entry of the VPC endpoint association.
", + "locationName":"privateDnsEntry" + }, + "AssociatedResourceArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the associated resource.
", + "locationName":"associatedResourceArn" + }, + "ResourceConfigurationGroupArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the resource configuration group.
", + "locationName":"resourceConfigurationGroupArn" + }, + "Tags":{ + "shape":"TagList", + "documentation":"The tags to apply to the VPC endpoint association.
", + "locationName":"tagSet" + } + }, + "documentation":"Describes the VPC resources, VPC endpoint services, Lattice services, or service networks associated with the VPC endpoint.
" + }, + "VpcEndpointAssociationSet":{ + "type":"list", + "member":{ + "shape":"VpcEndpointAssociation", + "locationName":"item" + } + }, "VpcEndpointConnection":{ "type":"structure", "members":{ @@ -63058,7 +64538,9 @@ "enum":[ "Interface", "Gateway", - "GatewayLoadBalancer" + "GatewayLoadBalancer", + "Resource", + "ServiceNetwork" ] }, "VpcFlowLogId":{"type":"string"}, @@ -63797,6 +65279,11 @@ "type":"string", "sensitive":true }, + "maxResults":{ + "type":"integer", + "max":100, + "min":1 + }, "preSharedKey":{ "type":"string", "sensitive":true From 632a5a2e9474b6eccb48dc0139b443fe747f6a24 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:48:20 +0000 Subject: [PATCH 33/35] Amazon Simple Storage Service Update: Amazon S3 introduces support for AWS Dedicated Local Zones --- ...re-AmazonSimpleStorageService-36d4cfc.json | 6 + .../codegen-resources/endpoint-rule-set.json | 528 ++++++++++++++++++ .../codegen-resources/endpoint-tests.json | 483 ++++++++++++++-- .../codegen-resources/service-2.json | 146 ++--- 4 files changed, 1053 insertions(+), 110 deletions(-) create mode 100644 .changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json diff --git a/.changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json b/.changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json new file mode 100644 index 00000000000..3586cd88780 --- /dev/null +++ b/.changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Amazon S3 introduces support for AWS Dedicated Local Zones" +} diff --git a/services/s3/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/s3/src/main/resources/codegen-resources/endpoint-rule-set.json index 1ef9810cdac..6a11be757de 100644 --- a/services/s3/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/s3/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -863,6 +863,270 @@ ], "type": "tree" }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 19, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 19, + 21, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 20, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 20, + 22, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 26, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 26, + 28, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], "error": "Unrecognized S3Express bucket name format.", @@ -1047,6 +1311,270 @@ ], "type": "tree" }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 19, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 19, + 21, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 20, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 20, + 22, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 26, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 26, + 28, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], "error": "Unrecognized S3Express bucket name format.", diff --git a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json index b6575d8e020..b6a69202747 100644 --- a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json @@ -7844,7 +7844,7 @@ } }, { - "documentation": "Data Plane with short AZ", + "documentation": "Data Plane with short zone name", "expect": { "endpoint": { "properties": { @@ -7858,7 +7858,7 @@ ], "backend": "S3Express" }, - "url": "https://mybucket--use1-az1--x-s3.s3express-use1-az1.us-east-1.amazonaws.com" + "url": "https://mybucket--abcd-ab1--x-s3.s3express-abcd-ab1.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -7868,14 +7868,14 @@ }, "operationName": "GetObject", "operationParams": { - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", "Key": "key" } } ], "params": { "Region": "us-east-1", - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, @@ -7883,7 +7883,124 @@ } }, { - "documentation": "Data Plane with short AZ fips", + "documentation": "Data Plane with short zone names (13 chars)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test-zone-ab1--x-s3.s3express-test-zone-ab1.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--test-zone-ab1--x-s3", + "Key": "key" + } + } + ], + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test-zone-ab1--x-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false + } + }, + { + "documentation": "Data Plane with medium zone names (14 chars)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-zone-ab1--x-s3.s3express-test1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--test1-zone-ab1--x-s3", + "Key": "key" + } + } + ], + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-zone-ab1--x-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false + } + }, + { + "documentation": "Data Plane with long zone names (20 chars)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-long1-zone-ab1--x-s3.s3express-test1-long1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--test1-long1-zone-ab1--x-s3", + "Key": "key" + } + } + ], + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-long1-zone-ab1--x-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false + } + }, + { + "documentation": "Data Plane with short zone fips", "expect": { "endpoint": { "properties": { @@ -7897,7 +8014,7 @@ ], "backend": "S3Express" }, - "url": "https://mybucket--use1-az1--x-s3.s3express-fips-use1-az1.us-east-1.amazonaws.com" + "url": "https://mybucket--test-ab1--x-s3.s3express-fips-test-ab1.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -7908,14 +8025,134 @@ }, "operationName": "GetObject", "operationParams": { - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "Key": "key" } } ], "params": { "Region": "us-east-1", - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false + } + }, + { + "documentation": "Data Plane with short zone (13 chars) fips", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test-zone-ab1--x-s3.s3express-fips-test-zone-ab1.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--test-zone-ab1--x-s3", + "Key": "key" + } + } + ], + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test-zone-ab1--x-s3", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false + } + }, + { + "documentation": "Data Plane with medium zone (14 chars) fips", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-zone-ab1--x-s3.s3express-fips-test1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--test1-zone-ab1--x-s3", + "Key": "key" + } + } + ], + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-zone-ab1--x-s3", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false + } + }, + { + "documentation": "Data Plane with long zone (20 chars) fips", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-long1-zone-ab1--x-s3.s3express-fips-test1-long1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--test1-long1-zone-ab1--x-s3", + "Key": "key" + } + } + ], + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-long1-zone-ab1--x-s3", "UseFIPS": true, "UseDualStack": false, "Accelerate": false, @@ -7931,30 +8168,30 @@ { "name": "sigv4-s3express", "signingName": "s3express", - "signingRegion": "ap-northeast-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ], "backend": "S3Express" }, - "url": "https://mybucket--apne1-az1--x-s3.s3express-apne1-az1.ap-northeast-1.amazonaws.com" + "url": "https://mybucket--test1-az1--x-s3.s3express-test1-az1.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "ap-northeast-1" + "AWS::Region": "us-west-2" }, "operationName": "GetObject", "operationParams": { - "Bucket": "mybucket--apne1-az1--x-s3", + "Bucket": "mybucket--test1-az1--x-s3", "Key": "key" } } ], "params": { - "Region": "ap-northeast-1", - "Bucket": "mybucket--apne1-az1--x-s3", + "Region": "us-west-2", + "Bucket": "mybucket--test1-az1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, @@ -7970,31 +8207,31 @@ { "name": "sigv4-s3express", "signingName": "s3express", - "signingRegion": "ap-northeast-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ], "backend": "S3Express" }, - "url": "https://mybucket--apne1-az1--x-s3.s3express-fips-apne1-az1.ap-northeast-1.amazonaws.com" + "url": "https://mybucket--test1-az1--x-s3.s3express-fips-test1-az1.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "ap-northeast-1", + "AWS::Region": "us-west-2", "AWS::UseFIPS": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "mybucket--apne1-az1--x-s3", + "Bucket": "mybucket--test1-az1--x-s3", "Key": "key" } } ], "params": { - "Region": "ap-northeast-1", - "Bucket": "mybucket--apne1-az1--x-s3", + "Region": "us-west-2", + "Bucket": "mybucket--test1-az1--x-s3", "UseFIPS": true, "UseDualStack": false, "Accelerate": false, @@ -8016,7 +8253,7 @@ ], "backend": "S3Express" }, - "url": "https://s3express-control.us-east-1.amazonaws.com/mybucket--use1-az1--x-s3" + "url": "https://s3express-control.us-east-1.amazonaws.com/mybucket--test-ab1--x-s3" } }, "operationInputs": [ @@ -8026,13 +8263,13 @@ }, "operationName": "CreateBucket", "operationParams": { - "Bucket": "mybucket--use1-az1--x-s3" + "Bucket": "mybucket--test-ab1--x-s3" } } ], "params": { "Region": "us-east-1", - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, @@ -8055,7 +8292,7 @@ ], "backend": "S3Express" }, - "url": "https://s3express-control-fips.us-east-1.amazonaws.com/mybucket--use1-az1--x-s3" + "url": "https://s3express-control-fips.us-east-1.amazonaws.com/mybucket--test-ab1--x-s3" } }, "operationInputs": [ @@ -8066,13 +8303,13 @@ }, "operationName": "CreateBucket", "operationParams": { - "Bucket": "mybucket--use1-az1--x-s3" + "Bucket": "mybucket--test-ab1--x-s3" } } ], "params": { "Region": "us-east-1", - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "UseFIPS": true, "UseDualStack": false, "Accelerate": false, @@ -8178,6 +8415,33 @@ "DisableS3ExpressSessionAuth": true } }, + { + "documentation": "Data Plane sigv4 auth with short zone (13 chars)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test-zone-ab1--x-s3.s3express-test-zone-ab1.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test-zone-ab1--x-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "DisableS3ExpressSessionAuth": true + } + }, { "documentation": "Data Plane sigv4 auth with short AZ fips", "expect": { @@ -8205,6 +8469,33 @@ "DisableS3ExpressSessionAuth": true } }, + { + "documentation": "Data Plane sigv4 auth with short zone (13 chars) fips", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test-zone-ab1--x-s3.s3express-fips-test-zone-ab1.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test-zone-ab1--x-s3", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "DisableS3ExpressSessionAuth": true + } + }, { "documentation": "Data Plane sigv4 auth with long AZ", "expect": { @@ -8214,18 +8505,74 @@ { "name": "sigv4", "signingName": "s3express", - "signingRegion": "ap-northeast-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ], "backend": "S3Express" }, - "url": "https://mybucket--apne1-az1--x-s3.s3express-apne1-az1.ap-northeast-1.amazonaws.com" + "url": "https://mybucket--test1-az1--x-s3.s3express-test1-az1.us-west-2.amazonaws.com" } }, "params": { - "Region": "ap-northeast-1", - "Bucket": "mybucket--apne1-az1--x-s3", + "Region": "us-west-2", + "Bucket": "mybucket--test1-az1--x-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true + } + }, + { + "documentation": "Data Plane sigv4 auth with medium zone(14 chars)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-zone-ab1--x-s3.s3express-test1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-zone-ab1--x-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true + } + }, + { + "documentation": "Data Plane sigv4 auth with long zone(20 chars)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-long1-zone-ab1--x-s3.s3express-test1-long1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-long1-zone-ab1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, @@ -8242,18 +8589,74 @@ { "name": "sigv4", "signingName": "s3express", - "signingRegion": "ap-northeast-1", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-az1--x-s3.s3express-fips-test1-az1.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-az1--x-s3", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true + } + }, + { + "documentation": "Data Plane sigv4 auth with medium zone (14 chars) fips", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--test1-zone-ab1--x-s3.s3express-fips-test1-zone-ab1.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "Bucket": "mybucket--test1-zone-ab1--x-s3", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true + } + }, + { + "documentation": "Data Plane sigv4 auth with long zone (20 chars) fips", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ], "backend": "S3Express" }, - "url": "https://mybucket--apne1-az1--x-s3.s3express-fips-apne1-az1.ap-northeast-1.amazonaws.com" + "url": "https://mybucket--test1-long1-zone-ab1--x-s3.s3express-fips-test1-long1-zone-ab1.us-west-2.amazonaws.com" } }, "params": { - "Region": "ap-northeast-1", - "Bucket": "mybucket--apne1-az1--x-s3", + "Region": "us-west-2", + "Bucket": "mybucket--test1-long1-zone-ab1--x-s3", "UseFIPS": true, "UseDualStack": false, "Accelerate": false, @@ -8493,14 +8896,14 @@ }, "operationName": "GetObject", "operationParams": { - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "Key": "key" } } ], "params": { "Region": "us-east-1", - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "UseFIPS": false, "UseDualStack": true, "Accelerate": false, @@ -8520,14 +8923,14 @@ }, "operationName": "GetObject", "operationParams": { - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "Key": "key" } } ], "params": { "Region": "us-east-1", - "Bucket": "mybucket--use1-az1--x-s3", + "Bucket": "mybucket--test-ab1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": true, @@ -8546,14 +8949,14 @@ }, "operationName": "GetObject", "operationParams": { - "Bucket": "my.bucket--use1-az1--x-s3", + "Bucket": "my.bucket--test-ab1--x-s3", "Key": "key" } } ], "params": { "Region": "us-east-1", - "Bucket": "my.bucket--use1-az1--x-s3", + "Bucket": "my.bucket--test-ab1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index d81fdbe0c6c..c92ba74cd2f 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber
value and the ETag
value that are returned after that part was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK
response has been sent. This means that a 200 OK
response can contain either a success or an error. The error response might be embedded in the 200 OK
response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
header, CompleteMultipartUpload
can still return a 200 OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If you provide an additional checksum value in your MultipartUpload
requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt
action for the CompleteMultipartUpload
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Error Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber
value and the ETag
value that are returned after that part was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK
response has been sent. This means that a 200 OK
response can contain either a success or an error. The error response might be embedded in the 200 OK
response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
header, CompleteMultipartUpload
can still return a 200 OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If you provide an additional checksum value in your MultipartUpload
requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt
action for the CompleteMultipartUpload
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Error Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CopyObject
:
This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket
.
Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.
General purpose buckets - If you send your CreateBucket
request to the s3.amazonaws.com
global endpoint, the request goes to the us-east-1
Region. So the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - In addition to the s3:CreateBucket
permission, the following permissions are required in a policy when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - In your CreateBucket
request, if you specify an access control list (ACL) and set it to public-read
, public-read-write
, authenticated-read
, or if you explicitly specify any other custom ACLs, both s3:CreateBucket
and s3:PutBucketAcl
permissions are required. In your CreateBucket
request, if you set the ACL to private
, or if you don't specify any ACLs, only the s3:CreateBucket
permission is required.
Object Lock - In your CreateBucket
request, if you set x-amz-bucket-object-lock-enabled
to true, the s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required.
To set an ACL on a bucket as part of a CreateBucket
request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced
. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl
to set the ACL. If you try to create a bucket with a public ACL, the request will fail.
For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.
For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to CreateBucket
:
This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket
.
Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.
General purpose buckets - If you send your CreateBucket
request to the s3.amazonaws.com
global endpoint, the request goes to the us-east-1
Region. So the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - In addition to the s3:CreateBucket
permission, the following permissions are required in a policy when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - In your CreateBucket
request, if you specify an access control list (ACL) and set it to public-read
, public-read-write
, authenticated-read
, or if you explicitly specify any other custom ACLs, both s3:CreateBucket
and s3:PutBucketAcl
permissions are required. In your CreateBucket
request, if you set the ACL to private
, or if you don't specify any ACLs, only the s3:CreateBucket
permission is required.
Object Lock - In your CreateBucket
request, if you set x-amz-bucket-object-lock-enabled
to true, the s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required.
To set an ACL on a bucket as part of a CreateBucket
request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced
. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl
to set the ACL. If you try to create a bucket with a public ACL, the request will fail.
For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.
For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to CreateBucket
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.
All GET
and PUT
requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.
All GET
and PUT
requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide.
To make Zonal endpoint API requests on a directory bucket, use the CreateSession
API operation. Specifically, you grant s3express:CreateSession
permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession
API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token
request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession
API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
CopyObject
API operation - Unlike other Zonal endpoint API operations, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
HeadBucket
API operation - Unlike other Zonal endpoint API operations, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession
permission to the bucket. In a policy, you can have the s3express:SessionMode
condition key to control who can create a ReadWrite
or ReadOnly
session. For more information about ReadWrite
or ReadOnly
sessions, see x-amz-create-session-mode
. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.
Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) from the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession
request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide.
To make Zonal endpoint API requests on a directory bucket, use the CreateSession
API operation. Specifically, you grant s3express:CreateSession
permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession
API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token
request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession
API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
CopyObject
API operation - Unlike other Zonal endpoint API operations, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
HeadBucket
API operation - Unlike other Zonal endpoint API operations, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession
permission to the bucket. In a policy, you can have the s3express:SessionMode
condition key to control who can create a ReadWrite
or ReadOnly
session. For more information about ReadWrite
or ReadOnly
sessions, see x-amz-create-session-mode
. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.
Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) from the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession
request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the s3:DeleteBucket
permission on the specified bucket in a policy.
Directory bucket permissions - You must have the s3express:DeleteBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to DeleteBucket
:
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the s3:DeleteBucket
permission on the specified bucket in a policy.
Directory bucket permissions - You must have the s3express:DeleteBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucket
:
This implementation of the DELETE action resets the default encryption for the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets.
General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to DeleteBucketEncryption
:
This implementation of the DELETE action resets the default encryption for the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets.
General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucketEncryption
:
Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", + "documentation":"Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -236,7 +236,7 @@ }, "input":{"shape":"DeleteBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation":"Deletes the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:DeleteBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to DeleteBucketPolicy
Deletes the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:DeleteBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucketPolicy
Removes an object from a bucket. The behavior depends on the bucket's versioning state. For more information, see Best practices to consider before deleting an object.
To remove a specific version, you must use the versionId
query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker
to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS. For more information about MFA delete and to see example requests, see Using MFA delete and Sample request in the Amazon S3 User Guide.
S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always have the s3:DeleteObject
permission.
You can also use PutBucketLifecycle to delete objects in Amazon S3.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion
permission.
If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
permissions.
Directory buckets permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following action is related to DeleteObject
:
Removes an object from a bucket. The behavior depends on the bucket's versioning state:
If bucket versioning is not enabled, the operation permanently deletes the object.
If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId
in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.
If bucket versioning is suspended, the operation removes the object that has a null versionId
, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId
, and all versions of the object have a versionId
, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId
, you must include the object’s versionId
in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
To remove a specific version, you must use the versionId
query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker
to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.
Directory buckets - MFA delete is not supported by directory buckets.
You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always have the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following action is related to DeleteObject
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to DeleteObjects
:
Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets.
General purpose bucket permissions - The s3:GetEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to GetBucketEncryption
:
Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets.
General purpose bucket permissions - The s3:GetEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to GetBucketEncryption
:
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API, which is compatible with the new functionality. The previous version of the API supported filtering based only on an object key name prefix, which is supported for general purpose buckets for backward compatibility. For the related API description, see GetBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects, transitions and tag filters are not supported.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:GetLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:GetLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API, which is compatible with the new functionality. The previous version of the API supported filtering based only on an object key name prefix, which is supported for general purpose buckets for backward compatibility. For the related API description, see GetBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects, transitions and tag filters are not supported.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:GetLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:GetLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:GetBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following action is related to GetBucketPolicy
:
Returns the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:GetBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following action is related to GetBucketPolicy
:
Retrieves an object from Amazon S3.
In the GetObject
request, specify the full key name for the object.
General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the object key name as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the object key name as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide.
Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket--use1-az5--x-s3
, specify the object key name as /photos/2006/February/sample.jpg
. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject
, you must have the READ
access to the object (or version). If you grant READ
access to the anonymous user, the GetObject
operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide.
If you include a versionId
in your request header, you must have the s3:GetObjectVersion
permission to access a specific version of an object. The s3:GetObject
permission is not required in this scenario.
If you request the current version of an object without a specific versionId
in the request header, only the s3:GetObject
permission is required. The s3:GetObjectVersion
permission is not required in this scenario.
If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Access Denied
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState
error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request
.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for the GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
There are times when you want to override certain response header values of a GetObject
response. For example, you might override the Content-Disposition
response header value through your GetObject
request.
You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK
is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object.
The response headers that you can override for the GetObject
response are Cache-Control
, Content-Disposition
, Content-Encoding
, Content-Language
, Content-Type
, and Expires
.
To override values for a set of response headers in the GetObject
response, you can use the following query parameters in the request.
response-cache-control
response-content-disposition
response-content-encoding
response-content-language
response-content-type
response-expires
When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to GetObject
:
Retrieves an object from Amazon S3.
In the GetObject
request, specify the full key name for the object.
General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the object key name as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the object key name as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide.
Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket--use1-az5--x-s3
, specify the object key name as /photos/2006/February/sample.jpg
. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject
, you must have the READ
access to the object (or version). If you grant READ
access to the anonymous user, the GetObject
operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide.
If you include a versionId
in your request header, you must have the s3:GetObjectVersion
permission to access a specific version of an object. The s3:GetObject
permission is not required in this scenario.
If you request the current version of an object without a specific versionId
in the request header, only the s3:GetObject
permission is required. The s3:GetObjectVersion
permission is not required in this scenario.
If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Access Denied
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState
error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request
.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for the GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
There are times when you want to override certain response header values of a GetObject
response. For example, you might override the Content-Disposition
response header value through your GetObject
request.
You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK
is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object.
The response headers that you can override for the GetObject
response are Cache-Control
, Content-Disposition
, Content-Encoding
, Content-Language
, Content-Type
, and Expires
.
To override values for a set of response headers in the GetObject
response, you can use the following query parameters in the request.
response-cache-control
response-content-disposition
response-content-encoding
response-content-language
response-content-type
response-expires
When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to GetObject
:
Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
GetObjectAttributes
combines the functionality of HeadObject
and ListParts
. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes
.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - To use GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
and s3:GetObjectVersionAttributes
permissions for this operation. If the bucket is not versioned, you need the s3:GetObject
and s3:GetObjectAttributes
permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
(\"no such key\") error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
(\"access denied\") error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a GET
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Consider the following when using request headers:
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK
and the data requested:
If-Match
condition evaluates to true
.
If-Unmodified-Since
condition evaluates to false
.
For more information about conditional requests, see RFC 7232.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified
:
If-None-Match
condition evaluates to false
.
If-Modified-Since
condition evaluates to true
.
For more information about conditional requests, see RFC 7232.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following actions are related to GetObjectAttributes
:
Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
GetObjectAttributes
combines the functionality of HeadObject
and ListParts
. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes
.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - To use GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
and s3:GetObjectVersionAttributes
permissions for this operation. If the bucket is not versioned, you need the s3:GetObject
and s3:GetObjectAttributes
permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
(\"no such key\") error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
(\"access denied\") error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a GET
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Consider the following when using request headers:
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK
and the data requested:
If-Match
condition evaluates to true
.
If-Unmodified-Since
condition evaluates to false
.
For more information about conditional requests, see RFC 7232.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified
:
If-None-Match
condition evaluates to false
.
If-Modified-Since
condition evaluates to true
.
For more information about conditional requests, see RFC 7232.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following actions are related to GetObjectAttributes
:
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not Found
code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.
General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateSession
permission in the Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not Found
code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.
General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateSession
permission in the Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported for directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported for directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the request. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
You must have the s3express:ListAllMyDirectoryBuckets
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The BucketRegion
response element is not part of the ListDirectoryBuckets
Response Syntax.
Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the request. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
You must have the s3express:ListAllMyDirectoryBuckets
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The BucketRegion
response element is not part of the ListDirectoryBuckets
Response Syntax.
This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
request, but has not yet been completed or aborted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. In these requests, include two query parameters: key-marker
and upload-id-marker
. Set the value of key-marker
to the NextKeyMarker
value from the previous response. Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
Directory buckets - The upload-id-marker
element and the NextUploadIdMarker
element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
request, but has not yet been completed or aborted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. In these requests, include two query parameters: key-marker
and upload-id-marker
. Set the value of key-marker
to the NextKeyMarker
value from the previous response. Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
Directory buckets - The upload-id-marker
element and the NextUploadIdMarker
element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
General purpose bucket - For general purpose buckets, ListObjectsV2
doesn't return prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, ListObjectsV2
response includes the prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.
The following operations are related to ListObjectsV2
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
General purpose bucket - For general purpose buckets, ListObjectsV2
doesn't return prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, ListObjectsV2
response includes the prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload.
To use this operation, you must provide the upload ID
in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.
The ListParts
request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true
, and a NextPartNumberMarker
element. To list remaining uploaded parts, in subsequent ListParts
requests, include the part-number-marker
query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt
action for the ListParts
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to ListParts
:
Lists the parts that have been uploaded for a specific multipart upload.
To use this operation, you must provide the upload ID
in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.
The ListParts
request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true
, and a NextPartNumberMarker
element. To list remaining uploaded parts, in subsequent ListParts
requests, include the part-number-marker
query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt
action for the ListParts
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to ListParts
:
This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets
You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.
Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).
We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported.
S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketEncryption
:
This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets
You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.
Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).
We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported.
S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to PutBucketEncryption
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility for general purpose buckets. For the related API description, see PutBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects,transitions and tag filters are not supported.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility for general purpose buckets. For the related API description, see PutBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects,transitions and tag filters are not supported.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketLifecycleConfiguration
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:PutBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketPolicy
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:PutBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to PutBucketPolicy
:
Adds an object to a bucket.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject
to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.
This functionality is not supported for directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your PutObject
request includes specific headers.
s3:PutObject
- To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object to it.
s3:PutObjectAcl
- To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
s3:PutObjectTagging
- To successfully set the tag-set with your PutObject
request, you must have the s3:PutObjectTagging
.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
Directory bucket - This functionality is not supported for directory buckets.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
", + "documentation":"Adds an object to a bucket.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject
to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.
This functionality is not supported for directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your PutObject
request includes specific headers.
s3:PutObject
- To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object to it.
s3:PutObjectAcl
- To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
s3:PutObjectTagging
- To successfully set the tag-set with your PutObject
request, you must have the s3:PutObjectTagging
.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
Directory bucket - This functionality is not supported for directory buckets.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1375,7 +1375,7 @@ "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation":"Uploads a part in a multipart upload.
In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs.
These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).
Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
).
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPart
:
Uploads a part in a multipart upload.
In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs.
These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).
Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
).
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to UploadPart
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to UploadPartCopy
:
The bucket name to which the upload was taking place.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name to which the upload was taking place.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The number of Availability Zone that's used for redundancy for the bucket.
" + "documentation":"The number of Zone (Availability Zone or Local Zone) that's used for redundancy for the bucket.
" }, "Type":{ "shape":"BucketType", @@ -2110,7 +2110,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to which the multipart upload was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
Name of the bucket to which the multipart upload was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
If the object expiration is configured, the response includes this header.
This functionality is not supported for directory buckets.
If the object expiration is configured, the response includes this header.
Object expiration information is not returned in directory buckets and this header returns the value \"NotImplemented
\" in all responses for directory buckets.
The name of the destination bucket.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the destination bucket.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, you get an HTTP 400 Bad Request
error with the error code InvalidRequest
.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
Specifies the location where the bucket will be created.
For directory buckets, the location type is Availability Zone.
This functionality is only supported by directory buckets.
Specifies the location where the bucket will be created.
Directory buckets - The location type is Availability Zone or Local Zone. When the location type is Local Zone, your Local Zone must be in opt-in status. Otherwise, you get an HTTP 400 Bad Request
error with the error code Access denied
. To learn more about opt-in Local Zones, see Opt-in Dedicated Local Zonesin the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
The name of the bucket to create.
General purpose buckets - For information about bucket naming restrictions, see Bucket naming rules in the Amazon S3 User Guide.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The name of the bucket to create.
General purpose buckets - For information about bucket naming restrictions, see Bucket naming rules in the Amazon S3 User Guide.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The name of the bucket where the multipart upload is initiated and where the object is uploaded.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket where the multipart upload is initiated and where the object is uploaded.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket containing the server-side encryption configuration to delete.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The name of the bucket containing the server-side encryption configuration to delete.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The bucket name.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The bucket name.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
Specifies the bucket being deleted.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
Specifies the bucket being deleted.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The bucket name of the bucket containing the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name of the bucket containing the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the objects to delete.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the objects to delete.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket from which the server-side encryption configuration is retrieved.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The name of the bucket from which the server-side encryption configuration is retrieved.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The bucket name to get the bucket policy for.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
Access points - When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
Access points and Object Lambda access points are not supported by directory buckets.
The bucket name to get the bucket policy for.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
Access points - When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
Access points and Object Lambda access points are not supported by directory buckets.
The name of the bucket that contains the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket that contains the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
If the object expiration is configured (see PutBucketLifecycleConfiguration
), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs providing object expiration information. The value of the rule-id
is URL-encoded.
This functionality is not supported for directory buckets.
If the object expiration is configured (see PutBucketLifecycleConfiguration
), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs providing object expiration information. The value of the rule-id
is URL-encoded.
Object expiration information is not returned in directory buckets and this header returns the value \"NotImplemented
\" in all responses for directory buckets.
The bucket name containing the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the location where the bucket will be created.
For directory buckets, the AZ ID of the Availability Zone where the bucket is created. An example AZ ID value is usw2-az1
.
This functionality is only supported by directory buckets.
The name of the location where the bucket will be created.
For directory buckets, the Zone ID of the Availability Zone or the Local Zone where the bucket is created. An example Zone ID value for an Availability Zone is usw2-az1
.
This functionality is only supported by directory buckets.
The bucket name.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
If the object expiration is configured (see PutBucketLifecycleConfiguration
), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs providing object expiration information. The value of the rule-id
is URL-encoded.
This functionality is not supported for directory buckets.
If the object expiration is configured (see PutBucketLifecycleConfiguration
), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs providing object expiration information. The value of the rule-id
is URL-encoded.
Object expiration information is not returned in directory buckets and this header returns the value \"NotImplemented
\" in all responses for directory buckets.
The name of the bucket that contains the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket that contains the object.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket containing the objects.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket containing the objects.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the parts are being uploaded.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the parts are being uploaded.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the location where the bucket will be created.
For directory buckets, the name of the location is the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is usw2-az1
.
The name of the location where the bucket will be created.
For directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is usw2-az1
.
Specifies the location where the bucket will be created.
For directory buckets, the location type is Availability Zone. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
Specifies the location where the bucket will be created.
For directory buckets, the location type is Availability Zone or Local Zone. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
Specifies default encryption for a bucket using server-side encryption with different key options.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
Specifies default encryption for a bucket using server-side encryption with different key options.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The name of the bucket.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
The name of the bucket.
Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date
and rule-id
key-value pairs that provide information about object expiration. The value of the rule-id
is URL-encoded.
This functionality is not supported for directory buckets.
If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date
and rule-id
key-value pairs that provide information about object expiration. The value of the rule-id
is URL-encoded.
Object expiration information is not returned in directory buckets and this header returns the value \"NotImplemented
\" in all responses for directory buckets.
The bucket name to which the PUT action was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name to which the PUT action was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
Type of restore request.
" + "documentation":"Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more
Type of restore request.
" }, "Tier":{ "shape":"Tier", @@ -10398,7 +10404,7 @@ }, "SelectParameters":{ "shape":"SelectParameters", - "documentation":"Describes the parameters for Select job types.
" + "documentation":"Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more
Describes the parameters for Select job types.
" }, "OutputLocation":{ "shape":"OutputLocation", @@ -10692,7 +10698,7 @@ "locationName":"x-amz-expected-bucket-owner" } }, - "documentation":"Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records. It returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, see S3Select API Documentation.
" + "documentation":"Learn Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more
Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records. It returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, see S3Select API Documentation.
" }, "SelectParameters":{ "type":"structure", @@ -10713,14 +10719,14 @@ }, "Expression":{ "shape":"Expression", - "documentation":"The expression that is used to query the object.
" + "documentation":"Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more
The expression that is used to query the object.
" }, "OutputSerialization":{ "shape":"OutputSerialization", "documentation":"Describes how the results of the Select job are serialized.
" } }, - "documentation":"Describes the parameters for Select job types.
" + "documentation":"Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more
Describes the parameters for Select job types.
Learn How to optimize querying your data in Amazon S3 using Amazon Athena, S3 Object Lambda, or client-side filtering.
" }, "ServerSideEncryption":{ "type":"string", @@ -11245,7 +11251,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The bucket name.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, you get an HTTP 400 Bad Request
error with the error code InvalidRequest
.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3
(for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.
Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
Access points and Object Lambda access points are not supported by directory buckets.
S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
Disables the HTTP endpoint for the specified DB cluster. Disabling this endpoint disables RDS Data API.
For more information, see Using RDS Data API in the Amazon Aurora User Guide.
This operation applies only to Aurora PostgreSQL Serverless v2 and provisioned DB clusters. To disable the HTTP endpoint for Aurora Serverless v1 DB clusters, use the EnableHttpEndpoint
parameter of the ModifyDBCluster
operation.
Disables the HTTP endpoint for the specified DB cluster. Disabling this endpoint disables RDS Data API.
For more information, see Using RDS Data API in the Amazon Aurora User Guide.
This operation applies only to Aurora Serverless v2 and provisioned DB clusters. To disable the HTTP endpoint for Aurora Serverless v1 DB clusters, use the EnableHttpEndpoint
parameter of the ModifyDBCluster
operation.
Enables the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.
When enabled, this endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the Aurora DB cluster. You can also query your database from inside the RDS console with the RDS query editor.
For more information, see Using RDS Data API in the Amazon Aurora User Guide.
This operation applies only to Aurora PostgreSQL Serverless v2 and provisioned DB clusters. To enable the HTTP endpoint for Aurora Serverless v1 DB clusters, use the EnableHttpEndpoint
parameter of the ModifyDBCluster
operation.
Enables the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.
When enabled, this endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the Aurora DB cluster. You can also query your database from inside the RDS console with the RDS query editor.
For more information, see Using RDS Data API in the Amazon Aurora User Guide.
This operation applies only to Aurora Serverless v2 and provisioned DB clusters. To enable the HTTP endpoint for Aurora Serverless v1 DB clusters, use the EnableHttpEndpoint
parameter of the ModifyDBCluster
operation.
The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess
. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide.
If MonitoringInterval
is set to a value other than 0
, supply a MonitoringRoleArn
value.
Valid for Cluster Type: Multi-AZ DB clusters only
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"Specifies the mode of Database Insights to enable for the cluster.
" + }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", "documentation":"Specifies whether to turn on Performance Insights for the DB cluster.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
Valid for Cluster Type: Multi-AZ DB clusters only
" @@ -4610,6 +4614,10 @@ "shape":"BooleanOptional", "documentation":"Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
This setting doesn't apply to the following DB instances:
Amazon Aurora (Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.)
RDS Custom
Specifies the mode of Database Insights to enable for the instance.
" + }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", "documentation":"Specifies whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom DB instances.
" @@ -4774,6 +4782,10 @@ "shape":"BooleanOptional", "documentation":"Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom DB instances.
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"Specifies the mode of Database Insights.
" + }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", "documentation":"Specifies whether to enable Performance Insights for the read replica.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom DB instances.
" @@ -5695,6 +5707,10 @@ "shape":"String", "documentation":"The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.
This setting is only for non-Aurora Multi-AZ DB clusters.
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"The mode of Database Insights that is enabled for the cluster.
" + }, "PerformanceInsightsEnabled":{ "shape":"BooleanOptional", "documentation":"Indicates whether Performance Insights is enabled for the DB cluster.
This setting is only for non-Aurora Multi-AZ DB clusters.
" @@ -6939,6 +6955,10 @@ "shape":"Boolean", "documentation":"Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled for the DB instance.
For a list of engine versions that support IAM database authentication, see IAM database authentication in the Amazon RDS User Guide and IAM database authentication in Aurora in the Amazon Aurora User Guide.
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"The mode of Database Insights that is enabled for the instance.
" + }, "PerformanceInsightsEnabled":{ "shape":"BooleanOptional", "documentation":"Indicates whether Performance Insights is enabled for the DB instance.
" @@ -8699,6 +8719,13 @@ "min":1, "pattern":"^arn:[A-Za-z][0-9A-Za-z-:._]*" }, + "DatabaseInsightsMode":{ + "type":"string", + "enum":[ + "standard", + "advanced" + ] + }, "DeleteBlueGreenDeploymentRequest":{ "type":"structure", "required":["BlueGreenDeploymentIdentifier"], @@ -12339,7 +12366,7 @@ }, "EnableHttpEndpoint":{ "shape":"BooleanOptional", - "documentation":"Specifies whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint isn't enabled.
When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the RDS query editor.
For more information, see Using RDS Data API in the Amazon Aurora User Guide.
This parameter applies only to Aurora Serverless v1 DB clusters. To enable or disable the HTTP endpoint for an Aurora PostgreSQL Serverless v2 or provisioned DB cluster, use the EnableHttpEndpoint
and DisableHttpEndpoint
operations.
Valid for Cluster Type: Aurora DB clusters only
" + "documentation":"Specifies whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint isn't enabled.
When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the RDS query editor.
For more information, see Using RDS Data API in the Amazon Aurora User Guide.
This parameter applies only to Aurora Serverless v1 DB clusters. To enable or disable the HTTP endpoint for an Aurora Serverless v2 or provisioned DB cluster, use the EnableHttpEndpoint
and DisableHttpEndpoint
operations.
Valid for Cluster Type: Aurora DB clusters only
" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -12377,6 +12404,10 @@ "shape":"String", "documentation":"The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess
. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.
If MonitoringInterval
is set to a value other than 0
, supply a MonitoringRoleArn
value.
Valid for Cluster Type: Multi-AZ DB clusters only
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"Specifies the mode of Database Insights to enable for the cluster.
" + }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", "documentation":"Specifies whether to turn on Performance Insights for the DB cluster.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
" @@ -12645,6 +12676,10 @@ "shape":"BooleanOptional", "documentation":"Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
This setting doesn't apply to Amazon Aurora. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom DB instances.
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"Specifies the mode of Database Insights to enable for the instance.
" + }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", "documentation":"Specifies whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom DB instances.
" @@ -15617,6 +15652,10 @@ "shape":"String", "documentation":"An Amazon Web Services Identity and Access Management (IAM) role with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon S3 bucket. For information about this role, see Creating an IAM role manually in the Amazon RDS User Guide.
" }, + "DatabaseInsightsMode":{ + "shape":"DatabaseInsightsMode", + "documentation":"Specifies the mode of Database Insights to enable for the instance.
" + }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", "documentation":"Specifies whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
" From 1fb1c81e9321c8f42caddb2f038d6c354d0d33d9 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 2 Dec 2024 03:51:41 +0000 Subject: [PATCH 35/35] Release 2.29.24. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.29.24.json | 192 ++++++++++++++++++ .../bugfix-AWSCRTbasedS3client-906e1ea.json | 6 - .../feature-AWSCleanRoomsService-f293562.json | 6 - .../feature-AWSInvoicing-e5d5943.json | 6 - .../feature-AWSOrganizations-42333d1.json | 6 - .../feature-AWSS3Control-ab425dc.json | 6 - .../feature-AWSSecurityHub-096bdcc.json | 6 - .../feature-AWSTransferFamily-5c2b94e.json | 6 - ...eature-AgentsforAmazonBedrock-35d36fc.json | 6 - ...AgentsforAmazonBedrockRuntime-70183c9.json | 6 - .../feature-AmazonBedrock-4b0d7bd.json | 6 - .../feature-AmazonChimeSDKVoice-59cfe26.json | 6 - .../feature-AmazonCloudWatchLogs-dd286dc.json | 6 - ...mazonConnectCampaignServiceV2-d4badfa.json | 6 - ...AmazonConnectCustomerProfiles-836a182.json | 6 - .../feature-AmazonConnectService-8488c96.json | 6 - ...ure-AmazonEC2ContainerService-889d675.json | 6 - ...ure-AmazonElasticComputeCloud-c98c8a3.json | 6 - ...mazonElasticKubernetesService-d3cdaff.json | 6 - .../feature-AmazonEventBridge-300044d.json | 6 - .../feature-AmazonFSx-bef794e.json | 6 - .../feature-AmazonGuardDuty-4ce4a9d.json | 6 - .../feature-AmazonMemoryDB-d74846d.json | 6 - ...ature-AmazonOpenSearchService-afd5c96.json | 6 - .../feature-AmazonQConnect-4057b7d.json | 6 - ...azonRelationalDatabaseService-e21caa4.json | 6 - ...re-AmazonSimpleStorageService-36d4cfc.json | 6 - .../feature-AmazonVPCLattice-e1068cd.json | 6 - .../feature-EC2ImageBuilder-0b74f6d.json | 6 - .../feature-NetworkFlowMonitor-28a0b5a.json | 6 - .../feature-QBusiness-05a6cee.json | 6 - ...ture-SecurityIncidentResponse-214853b.json | 6 - CHANGELOG.md | 125 ++++++++++++ README.md | 8 +- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 17 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 17 +- bundle-logging-bridge/pom.xml | 2 +- bundle-sdk/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/checksums-spi/pom.xml | 2 +- core/checksums/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/http-auth-aws-crt/pom.xml | 2 +- core/http-auth-aws-eventstream/pom.xml | 2 +- core/http-auth-aws/pom.xml | 2 +- core/http-auth-spi/pom.xml | 2 +- core/http-auth/pom.xml | 2 +- core/identity-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/protocols/smithy-rpcv2-protocol/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/retries-spi/pom.xml | 2 +- core/retries/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- .../s3-event-notifications/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/applicationsignals/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/apptest/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/artifact/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/b2bi/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/bcmdataexports/pom.xml | 2 +- services/bcmpricingcalculator/pom.xml | 2 +- services/bedrock/pom.xml | 2 +- services/bedrockagent/pom.xml | 2 +- services/bedrockagentruntime/pom.xml | 2 +- services/bedrockruntime/pom.xml | 2 +- services/billing/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chatbot/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cleanroomsml/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudfrontkeyvaluestore/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codeconnections/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcampaignsv2/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controlcatalog/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/costoptimizationhub/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/datazone/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/deadline/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/directoryservicedata/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/eksauth/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/freetier/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/geomaps/pom.xml | 2 +- services/geoplaces/pom.xml | 2 +- services/georoutes/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/inspectorscan/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/invoicing/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/launchwizard/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/mailmanager/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplaceagreement/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplacedeployment/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/marketplacereporting/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/neptunedata/pom.xml | 2 +- services/neptunegraph/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkflowmonitor/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/networkmonitor/pom.xml | 2 +- services/notifications/pom.xml | 2 +- services/notificationscontacts/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/observabilityadmin/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/partnercentralselling/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/pcaconnectorad/pom.xml | 2 +- services/pcaconnectorscep/pom.xml | 2 +- services/pcs/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 5 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qapps/pom.xml | 2 +- services/qbusiness/pom.xml | 2 +- services/qconnect/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/repostspace/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53profiles/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securityir/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/socialmessaging/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmquicksetup/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/supplychain/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/taxsettings/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreaminfluxdb/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/trustedadvisor/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesthinclient/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- .../pom.xml | 2 +- test/bundle-shading-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/crt-unavailable-tests/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- .../pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- test/v2-migration-tests/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- third-party/third-party-slf4j-api/pom.xml | 2 +- utils/pom.xml | 2 +- v2-migration/pom.xml | 2 +- 518 files changed, 838 insertions(+), 674 deletions(-) create mode 100644 .changes/2.29.24.json delete mode 100644 .changes/next-release/bugfix-AWSCRTbasedS3client-906e1ea.json delete mode 100644 .changes/next-release/feature-AWSCleanRoomsService-f293562.json delete mode 100644 .changes/next-release/feature-AWSInvoicing-e5d5943.json delete mode 100644 .changes/next-release/feature-AWSOrganizations-42333d1.json delete mode 100644 .changes/next-release/feature-AWSS3Control-ab425dc.json delete mode 100644 .changes/next-release/feature-AWSSecurityHub-096bdcc.json delete mode 100644 .changes/next-release/feature-AWSTransferFamily-5c2b94e.json delete mode 100644 .changes/next-release/feature-AgentsforAmazonBedrock-35d36fc.json delete mode 100644 .changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json delete mode 100644 .changes/next-release/feature-AmazonBedrock-4b0d7bd.json delete mode 100644 .changes/next-release/feature-AmazonChimeSDKVoice-59cfe26.json delete mode 100644 .changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json delete mode 100644 .changes/next-release/feature-AmazonConnectCampaignServiceV2-d4badfa.json delete mode 100644 .changes/next-release/feature-AmazonConnectCustomerProfiles-836a182.json delete mode 100644 .changes/next-release/feature-AmazonConnectService-8488c96.json delete mode 100644 .changes/next-release/feature-AmazonEC2ContainerService-889d675.json delete mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json delete mode 100644 .changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json delete mode 100644 .changes/next-release/feature-AmazonEventBridge-300044d.json delete mode 100644 .changes/next-release/feature-AmazonFSx-bef794e.json delete mode 100644 .changes/next-release/feature-AmazonGuardDuty-4ce4a9d.json delete mode 100644 .changes/next-release/feature-AmazonMemoryDB-d74846d.json delete mode 100644 .changes/next-release/feature-AmazonOpenSearchService-afd5c96.json delete mode 100644 .changes/next-release/feature-AmazonQConnect-4057b7d.json delete mode 100644 .changes/next-release/feature-AmazonRelationalDatabaseService-e21caa4.json delete mode 100644 .changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json delete mode 100644 .changes/next-release/feature-AmazonVPCLattice-e1068cd.json delete mode 100644 .changes/next-release/feature-EC2ImageBuilder-0b74f6d.json delete mode 100644 .changes/next-release/feature-NetworkFlowMonitor-28a0b5a.json delete mode 100644 .changes/next-release/feature-QBusiness-05a6cee.json delete mode 100644 .changes/next-release/feature-SecurityIncidentResponse-214853b.json diff --git a/.changes/2.29.24.json b/.changes/2.29.24.json new file mode 100644 index 00000000000..7ec6e9c96cf --- /dev/null +++ b/.changes/2.29.24.json @@ -0,0 +1,192 @@ +{ + "version": "2.29.24", + "date": "2024-12-01", + "entries": [ + { + "type": "bugfix", + "category": "AWS CRT-based S3 client", + "contributor": "", + "description": "Fixed an issue where an error was not surfaced if request failed halfway for a GetObject operation. See [#5631](https://github.com/aws/aws-sdk-java-v2/issues/5631)" + }, + { + "type": "feature", + "category": "AWS Clean Rooms Service", + "contributor": "", + "description": "This release allows customers and their partners to easily collaborate with data stored in Snowflake and Amazon Athena, without having to move or share their underlying data among collaborators." + }, + { + "type": "feature", + "category": "AWS Invoicing", + "contributor": "", + "description": "AWS Invoice Configuration allows you to receive separate AWS invoices based on your organizational needs. You can use the AWS SDKs to manage Invoice Units and programmatically fetch the information of the invoice receiver." + }, + { + "type": "feature", + "category": "AWS Organizations", + "contributor": "", + "description": "Add support for policy operations on the DECLARATIVE_POLICY_EC2 policy type." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Amazon S3 introduces support for AWS Dedicated Local Zones" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Add new Multi Domain Correlation findings." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "AWS Transfer Family now offers Web apps that enables simple and secure access to data stored in Amazon S3." + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock", + "contributor": "", + "description": "This release introduces APIs to upload documents directly into a Knowledge Base" + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock Runtime", + "contributor": "", + "description": "This release introduces a new Rerank API to leverage reranking models (with integration into Knowledge Bases); APIs to upload documents directly into Knowledge Base; RetrieveAndGenerateStream API for streaming response; Guardrails on Retrieve API; and ability to automatically generate filters" + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "Add support for Knowledge Base Evaluations & LLM as a judge" + }, + { + "type": "feature", + "category": "Amazon Chime SDK Voice", + "contributor": "", + "description": "This release adds supports for enterprises to integrate Amazon Connect with other voice systems. It supports directly transferring voice calls and metadata without using the public telephone network. It also supports real-time and post-call analytics." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "Adds PutIntegration, GetIntegration, ListIntegrations and DeleteIntegration APIs. Adds QueryLanguage support to StartQuery, GetQueryResults, DescribeQueries, DescribeQueryDefinitions, and PutQueryDefinition APIs." + }, + { + "type": "feature", + "category": "AmazonConnectCampaignServiceV2", + "contributor": "", + "description": "Amazon Connect Outbound Campaigns V2 / Features : Adds support for Event-Triggered Campaigns." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This release introduces Event Trigger APIs as part of Amazon Connect Customer Profiles service." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Adds support for WhatsApp Business messaging, IVR call recording, enabling Contact Lens for existing on-premise contact centers and telephony platforms, and enabling telephony and IVR migration to Amazon Connect independent of their contact center agents." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for Container Insights with Enhanced Observability for Amazon ECS." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Adds support for declarative policies that allow you to enforce desired configuration across an AWS organization through configuring account attributes. Adds support for Allowed AMIs that allows you to limit the use of AMIs in AWS accounts. Adds support for connectivity over non-HTTP protocols." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Added support for Auto Mode Clusters, Hybrid Nodes, and specifying computeTypes in the DescribeAddonVersions API." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Call private APIs by configuring Connections with VPC connectivity through PrivateLink and VPC Lattice" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "FSx API changes to support the public launch of the Amazon FSx Intelligent Tiering for OpenZFS storage class." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Add new Multi Domain Correlation findings." + }, + { + "type": "feature", + "category": "Amazon MemoryDB", + "contributor": "", + "description": "Amazon MemoryDB SDK now supports all APIs for Multi-Region. Please refer to the updated Amazon MemoryDB public documentation for detailed information on API usage." + }, + { + "type": "feature", + "category": "Amazon OpenSearch Service", + "contributor": "", + "description": "This feature introduces support for CRUDL APIs, enabling the creation and management of Connected data sources." + }, + { + "type": "feature", + "category": "Amazon Q Connect", + "contributor": "", + "description": "This release adds following capabilities: Configuring safeguards via AIGuardrails for Q in Connect inferencing, and APIs to support Q&A self-service use cases" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Amazon RDS supports CloudWatch Database Insights. You can use the SDK to create, modify, and describe the DatabaseInsightsMode for your DB instances and clusters." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Amazon S3 introduces support for AWS Dedicated Local Zones" + }, + { + "type": "feature", + "category": "Amazon VPC Lattice", + "contributor": "", + "description": "Lattice APIs that allow sharing and access of VPC resources across accounts." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "Added support for EC2 Image Builder's integration with AWS Marketplace for Marketplace components." + }, + { + "type": "feature", + "category": "Network Flow Monitor", + "contributor": "", + "description": "This release adds documentation for a new feature in Amazon CloudWatch called Network Flow Monitor. You can use Network Flow Monitor to get near real-time metrics, including retransmissions and data transferred, for your actual workloads." + }, + { + "type": "feature", + "category": "QBusiness", + "contributor": "", + "description": "Amazon Q Business now supports capabilities to extract insights and answer questions from visual elements embedded within documents, a browser extension for Google Chrome, Mozilla Firefox, and Microsoft Edge, and attachments across conversations." + }, + { + "type": "feature", + "category": "Security Incident Response", + "contributor": "", + "description": "AWS Security Incident Response is a purpose-built security incident solution designed to help customers prepare for, respond to, and recover from security incidents." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSCRTbasedS3client-906e1ea.json b/.changes/next-release/bugfix-AWSCRTbasedS3client-906e1ea.json deleted file mode 100644 index ca4c0d04c99..00000000000 --- a/.changes/next-release/bugfix-AWSCRTbasedS3client-906e1ea.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "bugfix", - "category": "AWS CRT-based S3 client", - "contributor": "", - "description": "Fixed an issue where an error was not surfaced if request failed halfway for a GetObject operation. See [#5631](https://github.com/aws/aws-sdk-java-v2/issues/5631)" -} diff --git a/.changes/next-release/feature-AWSCleanRoomsService-f293562.json b/.changes/next-release/feature-AWSCleanRoomsService-f293562.json deleted file mode 100644 index 9dc323edca6..00000000000 --- a/.changes/next-release/feature-AWSCleanRoomsService-f293562.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Clean Rooms Service", - "contributor": "", - "description": "This release allows customers and their partners to easily collaborate with data stored in Snowflake and Amazon Athena, without having to move or share their underlying data among collaborators." -} diff --git a/.changes/next-release/feature-AWSInvoicing-e5d5943.json b/.changes/next-release/feature-AWSInvoicing-e5d5943.json deleted file mode 100644 index 351fd25f6c8..00000000000 --- a/.changes/next-release/feature-AWSInvoicing-e5d5943.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Invoicing", - "contributor": "", - "description": "AWS Invoice Configuration allows you to receive separate AWS invoices based on your organizational needs. You can use the AWS SDKs to manage Invoice Units and programmatically fetch the information of the invoice receiver." -} diff --git a/.changes/next-release/feature-AWSOrganizations-42333d1.json b/.changes/next-release/feature-AWSOrganizations-42333d1.json deleted file mode 100644 index d7a1aa22540..00000000000 --- a/.changes/next-release/feature-AWSOrganizations-42333d1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Organizations", - "contributor": "", - "description": "Add support for policy operations on the DECLARATIVE_POLICY_EC2 policy type." -} diff --git a/.changes/next-release/feature-AWSS3Control-ab425dc.json b/.changes/next-release/feature-AWSS3Control-ab425dc.json deleted file mode 100644 index 078bc4e5004..00000000000 --- a/.changes/next-release/feature-AWSS3Control-ab425dc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS S3 Control", - "contributor": "", - "description": "Amazon S3 introduces support for AWS Dedicated Local Zones" -} diff --git a/.changes/next-release/feature-AWSSecurityHub-096bdcc.json b/.changes/next-release/feature-AWSSecurityHub-096bdcc.json deleted file mode 100644 index d5c8113589c..00000000000 --- a/.changes/next-release/feature-AWSSecurityHub-096bdcc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SecurityHub", - "contributor": "", - "description": "Add new Multi Domain Correlation findings." -} diff --git a/.changes/next-release/feature-AWSTransferFamily-5c2b94e.json b/.changes/next-release/feature-AWSTransferFamily-5c2b94e.json deleted file mode 100644 index bf7b01b7939..00000000000 --- a/.changes/next-release/feature-AWSTransferFamily-5c2b94e.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Transfer Family", - "contributor": "", - "description": "AWS Transfer Family now offers Web apps that enables simple and secure access to data stored in Amazon S3." -} diff --git a/.changes/next-release/feature-AgentsforAmazonBedrock-35d36fc.json b/.changes/next-release/feature-AgentsforAmazonBedrock-35d36fc.json deleted file mode 100644 index 0cf4314aba3..00000000000 --- a/.changes/next-release/feature-AgentsforAmazonBedrock-35d36fc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Agents for Amazon Bedrock", - "contributor": "", - "description": "This release introduces APIs to upload documents directly into a Knowledge Base" -} diff --git a/.changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json b/.changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json deleted file mode 100644 index c6fbb0457dc..00000000000 --- a/.changes/next-release/feature-AgentsforAmazonBedrockRuntime-70183c9.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Agents for Amazon Bedrock Runtime", - "contributor": "", - "description": "This release introduces a new Rerank API to leverage reranking models (with integration into Knowledge Bases); APIs to upload documents directly into Knowledge Base; RetrieveAndGenerateStream API for streaming response; Guardrails on Retrieve API; and ability to automatically generate filters" -} diff --git a/.changes/next-release/feature-AmazonBedrock-4b0d7bd.json b/.changes/next-release/feature-AmazonBedrock-4b0d7bd.json deleted file mode 100644 index c7f9f3a165f..00000000000 --- a/.changes/next-release/feature-AmazonBedrock-4b0d7bd.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Bedrock", - "contributor": "", - "description": "Add support for Knowledge Base Evaluations & LLM as a judge" -} diff --git a/.changes/next-release/feature-AmazonChimeSDKVoice-59cfe26.json b/.changes/next-release/feature-AmazonChimeSDKVoice-59cfe26.json deleted file mode 100644 index a70956b6d01..00000000000 --- a/.changes/next-release/feature-AmazonChimeSDKVoice-59cfe26.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Chime SDK Voice", - "contributor": "", - "description": "This release adds supports for enterprises to integrate Amazon Connect with other voice systems. It supports directly transferring voice calls and metadata without using the public telephone network. It also supports real-time and post-call analytics." -} diff --git a/.changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json b/.changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json deleted file mode 100644 index 65a06e6052d..00000000000 --- a/.changes/next-release/feature-AmazonCloudWatchLogs-dd286dc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon CloudWatch Logs", - "contributor": "", - "description": "Adds PutIntegration, GetIntegration, ListIntegrations and DeleteIntegration APIs. Adds QueryLanguage support to StartQuery, GetQueryResults, DescribeQueries, DescribeQueryDefinitions, and PutQueryDefinition APIs." -} diff --git a/.changes/next-release/feature-AmazonConnectCampaignServiceV2-d4badfa.json b/.changes/next-release/feature-AmazonConnectCampaignServiceV2-d4badfa.json deleted file mode 100644 index 59642837e4f..00000000000 --- a/.changes/next-release/feature-AmazonConnectCampaignServiceV2-d4badfa.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AmazonConnectCampaignServiceV2", - "contributor": "", - "description": "Amazon Connect Outbound Campaigns V2 / Features : Adds support for Event-Triggered Campaigns." -} diff --git a/.changes/next-release/feature-AmazonConnectCustomerProfiles-836a182.json b/.changes/next-release/feature-AmazonConnectCustomerProfiles-836a182.json deleted file mode 100644 index e27a8faa841..00000000000 --- a/.changes/next-release/feature-AmazonConnectCustomerProfiles-836a182.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Connect Customer Profiles", - "contributor": "", - "description": "This release introduces Event Trigger APIs as part of Amazon Connect Customer Profiles service." -} diff --git a/.changes/next-release/feature-AmazonConnectService-8488c96.json b/.changes/next-release/feature-AmazonConnectService-8488c96.json deleted file mode 100644 index 2aa68ed37e2..00000000000 --- a/.changes/next-release/feature-AmazonConnectService-8488c96.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Connect Service", - "contributor": "", - "description": "Adds support for WhatsApp Business messaging, IVR call recording, enabling Contact Lens for existing on-premise contact centers and telephony platforms, and enabling telephony and IVR migration to Amazon Connect independent of their contact center agents." -} diff --git a/.changes/next-release/feature-AmazonEC2ContainerService-889d675.json b/.changes/next-release/feature-AmazonEC2ContainerService-889d675.json deleted file mode 100644 index de5346ab27c..00000000000 --- a/.changes/next-release/feature-AmazonEC2ContainerService-889d675.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon EC2 Container Service", - "contributor": "", - "description": "This release adds support for Container Insights with Enhanced Observability for Amazon ECS." -} diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json b/.changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json deleted file mode 100644 index b02223c87bd..00000000000 --- a/.changes/next-release/feature-AmazonElasticComputeCloud-c98c8a3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Compute Cloud", - "contributor": "", - "description": "Adds support for declarative policies that allow you to enforce desired configuration across an AWS organization through configuring account attributes. Adds support for Allowed AMIs that allows you to limit the use of AMIs in AWS accounts. Adds support for connectivity over non-HTTP protocols." -} diff --git a/.changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json b/.changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json deleted file mode 100644 index c1fb832ade5..00000000000 --- a/.changes/next-release/feature-AmazonElasticKubernetesService-d3cdaff.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Kubernetes Service", - "contributor": "", - "description": "Added support for Auto Mode Clusters, Hybrid Nodes, and specifying computeTypes in the DescribeAddonVersions API." -} diff --git a/.changes/next-release/feature-AmazonEventBridge-300044d.json b/.changes/next-release/feature-AmazonEventBridge-300044d.json deleted file mode 100644 index 89ec407cacc..00000000000 --- a/.changes/next-release/feature-AmazonEventBridge-300044d.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon EventBridge", - "contributor": "", - "description": "Call private APIs by configuring Connections with VPC connectivity through PrivateLink and VPC Lattice" -} diff --git a/.changes/next-release/feature-AmazonFSx-bef794e.json b/.changes/next-release/feature-AmazonFSx-bef794e.json deleted file mode 100644 index c74ac64fbad..00000000000 --- a/.changes/next-release/feature-AmazonFSx-bef794e.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon FSx", - "contributor": "", - "description": "FSx API changes to support the public launch of the Amazon FSx Intelligent Tiering for OpenZFS storage class." -} diff --git a/.changes/next-release/feature-AmazonGuardDuty-4ce4a9d.json b/.changes/next-release/feature-AmazonGuardDuty-4ce4a9d.json deleted file mode 100644 index 0e32749c5fa..00000000000 --- a/.changes/next-release/feature-AmazonGuardDuty-4ce4a9d.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon GuardDuty", - "contributor": "", - "description": "Add new Multi Domain Correlation findings." -} diff --git a/.changes/next-release/feature-AmazonMemoryDB-d74846d.json b/.changes/next-release/feature-AmazonMemoryDB-d74846d.json deleted file mode 100644 index a3310f28732..00000000000 --- a/.changes/next-release/feature-AmazonMemoryDB-d74846d.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon MemoryDB", - "contributor": "", - "description": "Amazon MemoryDB SDK now supports all APIs for Multi-Region. Please refer to the updated Amazon MemoryDB public documentation for detailed information on API usage." -} diff --git a/.changes/next-release/feature-AmazonOpenSearchService-afd5c96.json b/.changes/next-release/feature-AmazonOpenSearchService-afd5c96.json deleted file mode 100644 index a7f3c1dfdbe..00000000000 --- a/.changes/next-release/feature-AmazonOpenSearchService-afd5c96.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon OpenSearch Service", - "contributor": "", - "description": "This feature introduces support for CRUDL APIs, enabling the creation and management of Connected data sources." -} diff --git a/.changes/next-release/feature-AmazonQConnect-4057b7d.json b/.changes/next-release/feature-AmazonQConnect-4057b7d.json deleted file mode 100644 index 26f62ed5ed9..00000000000 --- a/.changes/next-release/feature-AmazonQConnect-4057b7d.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Q Connect", - "contributor": "", - "description": "This release adds following capabilities: Configuring safeguards via AIGuardrails for Q in Connect inferencing, and APIs to support Q&A self-service use cases" -} diff --git a/.changes/next-release/feature-AmazonRelationalDatabaseService-e21caa4.json b/.changes/next-release/feature-AmazonRelationalDatabaseService-e21caa4.json deleted file mode 100644 index 5b286eeaaa3..00000000000 --- a/.changes/next-release/feature-AmazonRelationalDatabaseService-e21caa4.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Relational Database Service", - "contributor": "", - "description": "Amazon RDS supports CloudWatch Database Insights. You can use the SDK to create, modify, and describe the DatabaseInsightsMode for your DB instances and clusters." -} diff --git a/.changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json b/.changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json deleted file mode 100644 index 3586cd88780..00000000000 --- a/.changes/next-release/feature-AmazonSimpleStorageService-36d4cfc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Simple Storage Service", - "contributor": "", - "description": "Amazon S3 introduces support for AWS Dedicated Local Zones" -} diff --git a/.changes/next-release/feature-AmazonVPCLattice-e1068cd.json b/.changes/next-release/feature-AmazonVPCLattice-e1068cd.json deleted file mode 100644 index 44aa0914c3c..00000000000 --- a/.changes/next-release/feature-AmazonVPCLattice-e1068cd.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon VPC Lattice", - "contributor": "", - "description": "Lattice APIs that allow sharing and access of VPC resources across accounts." -} diff --git a/.changes/next-release/feature-EC2ImageBuilder-0b74f6d.json b/.changes/next-release/feature-EC2ImageBuilder-0b74f6d.json deleted file mode 100644 index 34cee33e36d..00000000000 --- a/.changes/next-release/feature-EC2ImageBuilder-0b74f6d.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "EC2 Image Builder", - "contributor": "", - "description": "Added support for EC2 Image Builder's integration with AWS Marketplace for Marketplace components." -} diff --git a/.changes/next-release/feature-NetworkFlowMonitor-28a0b5a.json b/.changes/next-release/feature-NetworkFlowMonitor-28a0b5a.json deleted file mode 100644 index eccd37c6033..00000000000 --- a/.changes/next-release/feature-NetworkFlowMonitor-28a0b5a.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Network Flow Monitor", - "contributor": "", - "description": "This release adds documentation for a new feature in Amazon CloudWatch called Network Flow Monitor. You can use Network Flow Monitor to get near real-time metrics, including retransmissions and data transferred, for your actual workloads." -} diff --git a/.changes/next-release/feature-QBusiness-05a6cee.json b/.changes/next-release/feature-QBusiness-05a6cee.json deleted file mode 100644 index 2d461e0fb7d..00000000000 --- a/.changes/next-release/feature-QBusiness-05a6cee.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "QBusiness", - "contributor": "", - "description": "Amazon Q Business now supports capabilities to extract insights and answer questions from visual elements embedded within documents, a browser extension for Google Chrome, Mozilla Firefox, and Microsoft Edge, and attachments across conversations." -} diff --git a/.changes/next-release/feature-SecurityIncidentResponse-214853b.json b/.changes/next-release/feature-SecurityIncidentResponse-214853b.json deleted file mode 100644 index 6455b3f59f1..00000000000 --- a/.changes/next-release/feature-SecurityIncidentResponse-214853b.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Security Incident Response", - "contributor": "", - "description": "AWS Security Incident Response is a purpose-built security incident solution designed to help customers prepare for, respond to, and recover from security incidents." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 088e1f636f5..977cc5fabae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,129 @@ #### 👋 _Looking for changelogs for older versions? You can find them in the [changelogs](./changelogs) directory._ +# __2.29.24__ __2024-12-01__ +## __AWS CRT-based S3 client__ + - ### Bugfixes + - Fixed an issue where an error was not surfaced if request failed halfway for a GetObject operation. See [#5631](https://github.com/aws/aws-sdk-java-v2/issues/5631) + +## __AWS Clean Rooms Service__ + - ### Features + - This release allows customers and their partners to easily collaborate with data stored in Snowflake and Amazon Athena, without having to move or share their underlying data among collaborators. + +## __AWS Invoicing__ + - ### Features + - AWS Invoice Configuration allows you to receive separate AWS invoices based on your organizational needs. You can use the AWS SDKs to manage Invoice Units and programmatically fetch the information of the invoice receiver. + +## __AWS Organizations__ + - ### Features + - Add support for policy operations on the DECLARATIVE_POLICY_EC2 policy type. + +## __AWS S3 Control__ + - ### Features + - Amazon S3 introduces support for AWS Dedicated Local Zones + +## __AWS SecurityHub__ + - ### Features + - Add new Multi Domain Correlation findings. + +## __AWS Transfer Family__ + - ### Features + - AWS Transfer Family now offers Web apps that enables simple and secure access to data stored in Amazon S3. + +## __Agents for Amazon Bedrock__ + - ### Features + - This release introduces APIs to upload documents directly into a Knowledge Base + +## __Agents for Amazon Bedrock Runtime__ + - ### Features + - This release introduces a new Rerank API to leverage reranking models (with integration into Knowledge Bases); APIs to upload documents directly into Knowledge Base; RetrieveAndGenerateStream API for streaming response; Guardrails on Retrieve API; and ability to automatically generate filters + +## __Amazon Bedrock__ + - ### Features + - Add support for Knowledge Base Evaluations & LLM as a judge + +## __Amazon Chime SDK Voice__ + - ### Features + - This release adds supports for enterprises to integrate Amazon Connect with other voice systems. It supports directly transferring voice calls and metadata without using the public telephone network. It also supports real-time and post-call analytics. + +## __Amazon CloudWatch Logs__ + - ### Features + - Adds PutIntegration, GetIntegration, ListIntegrations and DeleteIntegration APIs. Adds QueryLanguage support to StartQuery, GetQueryResults, DescribeQueries, DescribeQueryDefinitions, and PutQueryDefinition APIs. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This release introduces Event Trigger APIs as part of Amazon Connect Customer Profiles service. + +## __Amazon Connect Service__ + - ### Features + - Adds support for WhatsApp Business messaging, IVR call recording, enabling Contact Lens for existing on-premise contact centers and telephony platforms, and enabling telephony and IVR migration to Amazon Connect independent of their contact center agents. + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for Container Insights with Enhanced Observability for Amazon ECS. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support for declarative policies that allow you to enforce desired configuration across an AWS organization through configuring account attributes. Adds support for Allowed AMIs that allows you to limit the use of AMIs in AWS accounts. Adds support for connectivity over non-HTTP protocols. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Added support for Auto Mode Clusters, Hybrid Nodes, and specifying computeTypes in the DescribeAddonVersions API. + +## __Amazon EventBridge__ + - ### Features + - Call private APIs by configuring Connections with VPC connectivity through PrivateLink and VPC Lattice + +## __Amazon FSx__ + - ### Features + - FSx API changes to support the public launch of the Amazon FSx Intelligent Tiering for OpenZFS storage class. + +## __Amazon GuardDuty__ + - ### Features + - Add new Multi Domain Correlation findings. + +## __Amazon MemoryDB__ + - ### Features + - Amazon MemoryDB SDK now supports all APIs for Multi-Region. Please refer to the updated Amazon MemoryDB public documentation for detailed information on API usage. + +## __Amazon OpenSearch Service__ + - ### Features + - This feature introduces support for CRUDL APIs, enabling the creation and management of Connected data sources. + +## __Amazon Q Connect__ + - ### Features + - This release adds following capabilities: Configuring safeguards via AIGuardrails for Q in Connect inferencing, and APIs to support Q&A self-service use cases + +## __Amazon Relational Database Service__ + - ### Features + - Amazon RDS supports CloudWatch Database Insights. You can use the SDK to create, modify, and describe the DatabaseInsightsMode for your DB instances and clusters. + +## __Amazon Simple Storage Service__ + - ### Features + - Amazon S3 introduces support for AWS Dedicated Local Zones + +## __Amazon VPC Lattice__ + - ### Features + - Lattice APIs that allow sharing and access of VPC resources across accounts. + +## __AmazonConnectCampaignServiceV2__ + - ### Features + - Amazon Connect Outbound Campaigns V2 / Features : Adds support for Event-Triggered Campaigns. + +## __EC2 Image Builder__ + - ### Features + - Added support for EC2 Image Builder's integration with AWS Marketplace for Marketplace components. + +## __Network Flow Monitor__ + - ### Features + - This release adds documentation for a new feature in Amazon CloudWatch called Network Flow Monitor. You can use Network Flow Monitor to get near real-time metrics, including retransmissions and data transferred, for your actual workloads. + +## __QBusiness__ + - ### Features + - Amazon Q Business now supports capabilities to extract insights and answer questions from visual elements embedded within documents, a browser extension for Google Chrome, Mozilla Firefox, and Microsoft Edge, and attachments across conversations. + +## __Security Incident Response__ + - ### Features + - AWS Security Incident Response is a purpose-built security incident solution designed to help customers prepare for, respond to, and recover from security incidents. + # __2.29.23__ __2024-11-27__ ## __AWS Config__ - ### Features diff --git a/README.md b/README.md index 487d613ee6f..965c5253a7e 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ To automatically manage module versions (currently all modules have the same ver